Print this page
5719 Add support for LSI Fury adapters
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas.c
1 1 /*
2 2 * mr_sas.c: source for mr_sas driver
3 3 *
4 4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Swaminathan K S
11 11 * Arun Chandrashekhar
12 12 * Manju R
13 13 * Rasheed
14 14 * Shakeel Bukhari
15 15 *
16 16 * Redistribution and use in source and binary forms, with or without
17 17 * modification, are permitted provided that the following conditions are met:
18 18 *
19 19 * 1. Redistributions of source code must retain the above copyright notice,
20 20 * this list of conditions and the following disclaimer.
21 21 *
22 22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 23 * this list of conditions and the following disclaimer in the documentation
24 24 * and/or other materials provided with the distribution.
25 25 *
26 26 * 3. Neither the name of the author nor the names of its contributors may be
27 27 * used to endorse or promote products derived from this software without
28 28 * specific prior written permission.
29 29 *
30 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
38 38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 41 * DAMAGE.
42 42 */
43 43
44 44 /*
45 45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 47 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
48 + * Copyright 2015 Garrett D'Amore <garrett@damore.org>
48 49 */
49 50
50 51 #include <sys/types.h>
51 52 #include <sys/param.h>
52 53 #include <sys/file.h>
53 54 #include <sys/errno.h>
54 55 #include <sys/open.h>
55 56 #include <sys/cred.h>
56 57 #include <sys/modctl.h>
57 58 #include <sys/conf.h>
58 59 #include <sys/devops.h>
59 60 #include <sys/cmn_err.h>
60 61 #include <sys/kmem.h>
61 62 #include <sys/stat.h>
62 63 #include <sys/mkdev.h>
63 64 #include <sys/pci.h>
64 65 #include <sys/scsi/scsi.h>
65 66 #include <sys/ddi.h>
66 67 #include <sys/sunddi.h>
67 68 #include <sys/atomic.h>
68 69 #include <sys/signal.h>
69 70 #include <sys/byteorder.h>
70 71 #include <sys/sdt.h>
71 72 #include <sys/fs/dv_node.h> /* devfs_clean */
72 73
73 74 #include "mr_sas.h"
74 75
75 76 /*
76 77 * FMA header files
77 78 */
78 79 #include <sys/ddifm.h>
79 80 #include <sys/fm/protocol.h>
80 81 #include <sys/fm/util.h>
81 82 #include <sys/fm/io/ddi.h>
82 83
83 84 /* Macros to help Skinny and stock 2108/MFI live together. */
84 85 #define WR_IB_PICK_QPORT(addr, instance) \
85 86 if ((instance)->skinny) { \
86 87 WR_IB_LOW_QPORT((addr), (instance)); \
87 88 WR_IB_HIGH_QPORT(0, (instance)); \
88 89 } else { \
89 90 WR_IB_QPORT((addr), (instance)); \
90 91 }
91 92
92 93 /*
93 94 * Local static data
94 95 */
95 96 static void *mrsas_state = NULL;
96 97 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
97 98 volatile int debug_level_g = CL_NONE;
98 99 static volatile int msi_enable = 1;
99 100 static volatile int ctio_enable = 1;
100 101
101 102 /* Default Timeout value to issue online controller reset */
102 103 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
103 104 /* Simulate consecutive firmware fault */
104 105 static volatile int debug_fw_faults_after_ocr_g = 0;
105 106 #ifdef OCRDEBUG
106 107 /* Simulate three consecutive timeout for an IO */
107 108 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
108 109 #endif
109 110
110 111 #pragma weak scsi_hba_open
111 112 #pragma weak scsi_hba_close
112 113 #pragma weak scsi_hba_ioctl
113 114
114 115 /* Local static prototypes. */
115 116 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
116 117 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
117 118 #ifdef __sparc
118 119 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
119 120 #else
120 121 static int mrsas_quiesce(dev_info_t *);
121 122 #endif
122 123 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
123 124 static int mrsas_open(dev_t *, int, int, cred_t *);
124 125 static int mrsas_close(dev_t, int, int, cred_t *);
125 126 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
126 127
127 128 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
128 129 scsi_hba_tran_t *, struct scsi_device *);
129 130 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
130 131 struct scsi_pkt *, struct buf *, int, int, int, int,
131 132 int (*)(), caddr_t);
132 133 static int mrsas_tran_start(struct scsi_address *,
133 134 register struct scsi_pkt *);
134 135 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
135 136 static int mrsas_tran_reset(struct scsi_address *, int);
136 137 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
137 138 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
138 139 static void mrsas_tran_destroy_pkt(struct scsi_address *,
139 140 struct scsi_pkt *);
140 141 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
141 142 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
142 143 static int mrsas_tran_quiesce(dev_info_t *dip);
143 144 static int mrsas_tran_unquiesce(dev_info_t *dip);
144 145 static uint_t mrsas_isr();
145 146 static uint_t mrsas_softintr();
146 147 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
147 148
148 149 static void free_space_for_mfi(struct mrsas_instance *);
149 150 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
150 151 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
151 152 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
152 153 struct mrsas_cmd *);
153 154 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
154 155 struct mrsas_cmd *);
155 156 static void enable_intr_ppc(struct mrsas_instance *);
156 157 static void disable_intr_ppc(struct mrsas_instance *);
157 158 static int intr_ack_ppc(struct mrsas_instance *);
158 159 static void flush_cache(struct mrsas_instance *instance);
159 160 void display_scsi_inquiry(caddr_t);
160 161 static int start_mfi_aen(struct mrsas_instance *instance);
161 162 static int handle_drv_ioctl(struct mrsas_instance *instance,
162 163 struct mrsas_ioctl *ioctl, int mode);
163 164 static int handle_mfi_ioctl(struct mrsas_instance *instance,
164 165 struct mrsas_ioctl *ioctl, int mode);
165 166 static int handle_mfi_aen(struct mrsas_instance *instance,
166 167 struct mrsas_aen *aen);
167 168 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
168 169 struct scsi_address *, struct scsi_pkt *, uchar_t *);
169 170 static int alloc_additional_dma_buffer(struct mrsas_instance *);
170 171 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
171 172 struct mrsas_cmd *);
172 173 static int mrsas_kill_adapter(struct mrsas_instance *);
173 174 static int mrsas_issue_init_mfi(struct mrsas_instance *);
174 175 static int mrsas_reset_ppc(struct mrsas_instance *);
175 176 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
176 177 static int wait_for_outstanding(struct mrsas_instance *instance);
177 178 static int register_mfi_aen(struct mrsas_instance *instance,
178 179 uint32_t seq_num, uint32_t class_locale_word);
179 180 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
180 181 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
181 182 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
182 183 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
183 184 static int issue_mfi_smp(struct mrsas_instance *instance, struct
184 185 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
185 186 static int issue_mfi_stp(struct mrsas_instance *instance, struct
186 187 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
187 188 static int abort_aen_cmd(struct mrsas_instance *instance,
188 189 struct mrsas_cmd *cmd_to_abort);
189 190
190 191 static void mrsas_rem_intrs(struct mrsas_instance *instance);
191 192 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
192 193
193 194 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
194 195 scsi_hba_tran_t *, struct scsi_device *);
195 196 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
196 197 ddi_bus_config_op_t, void *, dev_info_t **);
197 198 static int mrsas_parse_devname(char *, int *, int *);
198 199 static int mrsas_config_all_devices(struct mrsas_instance *);
199 200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
200 201 uint8_t, dev_info_t **);
201 202 static int mrsas_name_node(dev_info_t *, char *, int);
202 203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
203 204 static void free_additional_dma_buffer(struct mrsas_instance *);
204 205 static void io_timeout_checker(void *);
205 206 static void mrsas_fm_init(struct mrsas_instance *);
206 207 static void mrsas_fm_fini(struct mrsas_instance *);
207 208
208 209 static struct mrsas_function_template mrsas_function_template_ppc = {
209 210 .read_fw_status_reg = read_fw_status_reg_ppc,
210 211 .issue_cmd = issue_cmd_ppc,
211 212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
212 213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
213 214 .enable_intr = enable_intr_ppc,
214 215 .disable_intr = disable_intr_ppc,
215 216 .intr_ack = intr_ack_ppc,
216 217 .init_adapter = mrsas_init_adapter_ppc
217 218 };
218 219
219 220
220 221 static struct mrsas_function_template mrsas_function_template_fusion = {
221 222 .read_fw_status_reg = tbolt_read_fw_status_reg,
222 223 .issue_cmd = tbolt_issue_cmd,
223 224 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
224 225 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
225 226 .enable_intr = tbolt_enable_intr,
226 227 .disable_intr = tbolt_disable_intr,
227 228 .intr_ack = tbolt_intr_ack,
228 229 .init_adapter = mrsas_init_adapter_tbolt
229 230 };
230 231
231 232
232 233 ddi_dma_attr_t mrsas_generic_dma_attr = {
233 234 DMA_ATTR_V0, /* dma_attr_version */
234 235 0, /* low DMA address range */
235 236 0xFFFFFFFFU, /* high DMA address range */
236 237 0xFFFFFFFFU, /* DMA counter register */
237 238 8, /* DMA address alignment */
238 239 0x07, /* DMA burstsizes */
239 240 1, /* min DMA size */
240 241 0xFFFFFFFFU, /* max DMA size */
241 242 0xFFFFFFFFU, /* segment boundary */
242 243 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
243 244 512, /* granularity of device */
244 245 0 /* bus specific DMA flags */
245 246 };
246 247
247 248 int32_t mrsas_max_cap_maxxfer = 0x1000000;
248 249
249 250 /*
250 251 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
251 252 * Limit size to 256K
252 253 */
253 254 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
254 255
255 256 /*
256 257 * cb_ops contains base level routines
257 258 */
258 259 static struct cb_ops mrsas_cb_ops = {
259 260 mrsas_open, /* open */
260 261 mrsas_close, /* close */
261 262 nodev, /* strategy */
262 263 nodev, /* print */
263 264 nodev, /* dump */
264 265 nodev, /* read */
265 266 nodev, /* write */
266 267 mrsas_ioctl, /* ioctl */
267 268 nodev, /* devmap */
268 269 nodev, /* mmap */
269 270 nodev, /* segmap */
270 271 nochpoll, /* poll */
271 272 nodev, /* cb_prop_op */
272 273 0, /* streamtab */
273 274 D_NEW | D_HOTPLUG, /* cb_flag */
274 275 CB_REV, /* cb_rev */
275 276 nodev, /* cb_aread */
276 277 nodev /* cb_awrite */
277 278 };
278 279
279 280 /*
280 281 * dev_ops contains configuration routines
281 282 */
282 283 static struct dev_ops mrsas_ops = {
283 284 DEVO_REV, /* rev, */
284 285 0, /* refcnt */
285 286 mrsas_getinfo, /* getinfo */
286 287 nulldev, /* identify */
287 288 nulldev, /* probe */
288 289 mrsas_attach, /* attach */
289 290 mrsas_detach, /* detach */
290 291 #ifdef __sparc
291 292 mrsas_reset, /* reset */
292 293 #else /* __sparc */
293 294 nodev,
294 295 #endif /* __sparc */
295 296 &mrsas_cb_ops, /* char/block ops */
296 297 NULL, /* bus ops */
297 298 NULL, /* power */
298 299 #ifdef __sparc
299 300 ddi_quiesce_not_needed
300 301 #else /* __sparc */
301 302 mrsas_quiesce /* quiesce */
302 303 #endif /* __sparc */
303 304 };
304 305
305 306 static struct modldrv modldrv = {
306 307 &mod_driverops, /* module type - driver */
307 308 MRSAS_VERSION,
308 309 &mrsas_ops, /* driver ops */
309 310 };
310 311
311 312 static struct modlinkage modlinkage = {
312 313 MODREV_1, /* ml_rev - must be MODREV_1 */
313 314 &modldrv, /* ml_linkage */
314 315 NULL /* end of driver linkage */
315 316 };
316 317
317 318 static struct ddi_device_acc_attr endian_attr = {
318 319 DDI_DEVICE_ATTR_V1,
319 320 DDI_STRUCTURE_LE_ACC,
320 321 DDI_STRICTORDER_ACC,
321 322 DDI_DEFAULT_ACC
322 323 };
323 324
324 325 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
325 326 unsigned int enable_fp = 1;
326 327
327 328
328 329 /*
329 330 * ************************************************************************** *
330 331 * *
331 332 * common entry points - for loadable kernel modules *
332 333 * *
333 334 * ************************************************************************** *
334 335 */
335 336
336 337 /*
337 338 * _init - initialize a loadable module
338 339 * @void
339 340 *
340 341 * The driver should perform any one-time resource allocation or data
341 342 * initialization during driver loading in _init(). For example, the driver
342 343 * should initialize any mutexes global to the driver in this routine.
343 344 * The driver should not, however, use _init() to allocate or initialize
344 345 * anything that has to do with a particular instance of the device.
345 346 * Per-instance initialization must be done in attach().
346 347 */
347 348 int
348 349 _init(void)
349 350 {
350 351 int ret;
351 352
352 353 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
353 354
354 355 ret = ddi_soft_state_init(&mrsas_state,
355 356 sizeof (struct mrsas_instance), 0);
356 357
357 358 if (ret != DDI_SUCCESS) {
358 359 cmn_err(CE_WARN, "mr_sas: could not init state");
359 360 return (ret);
360 361 }
361 362
362 363 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
363 364 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
364 365 ddi_soft_state_fini(&mrsas_state);
365 366 return (ret);
366 367 }
367 368
368 369 ret = mod_install(&modlinkage);
369 370
370 371 if (ret != DDI_SUCCESS) {
371 372 cmn_err(CE_WARN, "mr_sas: mod_install failed");
372 373 scsi_hba_fini(&modlinkage);
373 374 ddi_soft_state_fini(&mrsas_state);
374 375 }
375 376
376 377 return (ret);
377 378 }
378 379
379 380 /*
380 381 * _info - returns information about a loadable module.
381 382 * @void
382 383 *
383 384 * _info() is called to return module information. This is a typical entry
384 385 * point that does predefined role. It simply calls mod_info().
385 386 */
386 387 int
387 388 _info(struct modinfo *modinfop)
388 389 {
389 390 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
390 391
391 392 return (mod_info(&modlinkage, modinfop));
392 393 }
393 394
394 395 /*
395 396 * _fini - prepare a loadable module for unloading
396 397 * @void
397 398 *
398 399 * In _fini(), the driver should release any resources that were allocated in
399 400 * _init(). The driver must remove itself from the system module list.
400 401 */
401 402 int
402 403 _fini(void)
403 404 {
404 405 int ret;
405 406
406 407 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
407 408
408 409 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
409 410 con_log(CL_ANN1,
410 411 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
411 412 return (ret);
412 413 }
413 414
414 415 scsi_hba_fini(&modlinkage);
415 416 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
416 417
417 418 ddi_soft_state_fini(&mrsas_state);
418 419 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
419 420
420 421 return (ret);
421 422 }
422 423
423 424
424 425 /*
425 426 * ************************************************************************** *
426 427 * *
427 428 * common entry points - for autoconfiguration *
428 429 * *
429 430 * ************************************************************************** *
430 431 */
431 432 /*
432 433 * attach - adds a device to the system as part of initialization
433 434 * @dip:
434 435 * @cmd:
435 436 *
436 437 * The kernel calls a driver's attach() entry point to attach an instance of
437 438 * a device (for MegaRAID, it is instance of a controller) or to resume
438 439 * operation for an instance of a device that has been suspended or has been
439 440 * shut down by the power management framework
440 441 * The attach() entry point typically includes the following types of
441 442 * processing:
442 443 * - allocate a soft-state structure for the device instance (for MegaRAID,
443 444 * controller instance)
444 445 * - initialize per-instance mutexes
445 446 * - initialize condition variables
446 447 * - register the device's interrupts (for MegaRAID, controller's interrupts)
447 448 * - map the registers and memory of the device instance (for MegaRAID,
448 449 * controller instance)
449 450 * - create minor device nodes for the device instance (for MegaRAID,
450 451 * controller instance)
451 452 * - report that the device instance (for MegaRAID, controller instance) has
452 453 * attached
453 454 */
454 455 static int
455 456 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
456 457 {
457 458 int instance_no;
458 459 int nregs;
459 460 int i = 0;
460 461 uint8_t irq;
461 462 uint16_t vendor_id;
462 463 uint16_t device_id;
463 464 uint16_t subsysvid;
464 465 uint16_t subsysid;
465 466 uint16_t command;
466 467 off_t reglength = 0;
467 468 int intr_types = 0;
468 469 char *data;
469 470
470 471 scsi_hba_tran_t *tran;
471 472 ddi_dma_attr_t tran_dma_attr;
472 473 struct mrsas_instance *instance;
473 474
474 475 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
475 476
476 477 /* CONSTCOND */
477 478 ASSERT(NO_COMPETING_THREADS);
478 479
479 480 instance_no = ddi_get_instance(dip);
480 481
481 482 /*
482 483 * check to see whether this device is in a DMA-capable slot.
483 484 */
484 485 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
485 486 cmn_err(CE_WARN,
486 487 "mr_sas%d: Device in slave-only slot, unused",
487 488 instance_no);
488 489 return (DDI_FAILURE);
489 490 }
490 491
491 492 switch (cmd) {
492 493 case DDI_ATTACH:
493 494 /* allocate the soft state for the instance */
494 495 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
495 496 != DDI_SUCCESS) {
496 497 cmn_err(CE_WARN,
497 498 "mr_sas%d: Failed to allocate soft state",
498 499 instance_no);
499 500 return (DDI_FAILURE);
500 501 }
501 502
502 503 instance = (struct mrsas_instance *)ddi_get_soft_state
503 504 (mrsas_state, instance_no);
504 505
505 506 if (instance == NULL) {
506 507 cmn_err(CE_WARN,
507 508 "mr_sas%d: Bad soft state", instance_no);
508 509 ddi_soft_state_free(mrsas_state, instance_no);
509 510 return (DDI_FAILURE);
510 511 }
511 512
512 513 instance->unroll.softs = 1;
513 514
514 515 /* Setup the PCI configuration space handles */
515 516 if (pci_config_setup(dip, &instance->pci_handle) !=
516 517 DDI_SUCCESS) {
517 518 cmn_err(CE_WARN,
518 519 "mr_sas%d: pci config setup failed ",
519 520 instance_no);
520 521
521 522 ddi_soft_state_free(mrsas_state, instance_no);
522 523 return (DDI_FAILURE);
523 524 }
524 525
525 526 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
526 527 cmn_err(CE_WARN,
527 528 "mr_sas: failed to get registers.");
528 529
529 530 pci_config_teardown(&instance->pci_handle);
530 531 ddi_soft_state_free(mrsas_state, instance_no);
531 532 return (DDI_FAILURE);
532 533 }
533 534
534 535 vendor_id = pci_config_get16(instance->pci_handle,
535 536 PCI_CONF_VENID);
536 537 device_id = pci_config_get16(instance->pci_handle,
537 538 PCI_CONF_DEVID);
538 539
539 540 subsysvid = pci_config_get16(instance->pci_handle,
540 541 PCI_CONF_SUBVENID);
541 542 subsysid = pci_config_get16(instance->pci_handle,
542 543 PCI_CONF_SUBSYSID);
543 544
544 545 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
545 546 (pci_config_get16(instance->pci_handle,
546 547 PCI_CONF_COMM) | PCI_COMM_ME));
547 548 irq = pci_config_get8(instance->pci_handle,
548 549 PCI_CONF_ILINE);
549 550
550 551 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
551 552 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
552 553 instance_no, vendor_id, device_id, subsysvid,
553 554 subsysid, irq, MRSAS_VERSION));
554 555
555 556 /* enable bus-mastering */
556 557 command = pci_config_get16(instance->pci_handle,
557 558 PCI_CONF_COMM);
558 559
559 560 if (!(command & PCI_COMM_ME)) {
560 561 command |= PCI_COMM_ME;
561 562
562 563 pci_config_put16(instance->pci_handle,
563 564 PCI_CONF_COMM, command);
564 565
565 566 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
↓ open down ↓ |
508 lines elided |
↑ open up ↑ |
566 567 "enable bus-mastering", instance_no));
567 568 } else {
568 569 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
569 570 "bus-mastering already set", instance_no));
570 571 }
571 572
572 573 /* initialize function pointers */
573 574 switch (device_id) {
574 575 case PCI_DEVICE_ID_LSI_TBOLT:
575 576 case PCI_DEVICE_ID_LSI_INVADER:
577 + case PCI_DEVICE_ID_LSI_FURY:
576 578 con_log(CL_ANN, (CE_NOTE,
577 579 "mr_sas: 2208 T.B. device detected"));
578 580
579 581 instance->func_ptr =
580 582 &mrsas_function_template_fusion;
581 583 instance->tbolt = 1;
582 584 break;
583 585
584 586 case PCI_DEVICE_ID_LSI_SKINNY:
585 587 case PCI_DEVICE_ID_LSI_SKINNY_NEW:
586 588 /*
587 589 * FALLTHRU to PPC-style functions, but mark this
588 590 * instance as Skinny, because the register set is
589 591 * slightly different (See WR_IB_PICK_QPORT), and
590 592 * certain other features are available to a Skinny
591 593 * HBA.
592 594 */
593 595 instance->skinny = 1;
594 596 /* FALLTHRU */
595 597
596 598 case PCI_DEVICE_ID_LSI_2108VDE:
597 599 case PCI_DEVICE_ID_LSI_2108V:
598 600 con_log(CL_ANN, (CE_NOTE,
599 601 "mr_sas: 2108 Liberator device detected"));
600 602
601 603 instance->func_ptr =
602 604 &mrsas_function_template_ppc;
603 605 break;
604 606
605 607 default:
606 608 cmn_err(CE_WARN,
607 609 "mr_sas: Invalid device detected");
608 610
609 611 pci_config_teardown(&instance->pci_handle);
610 612 ddi_soft_state_free(mrsas_state, instance_no);
611 613 return (DDI_FAILURE);
612 614 }
613 615
614 616 instance->baseaddress = pci_config_get32(
615 617 instance->pci_handle, PCI_CONF_BASE0);
616 618 instance->baseaddress &= 0x0fffc;
617 619
618 620 instance->dip = dip;
619 621 instance->vendor_id = vendor_id;
620 622 instance->device_id = device_id;
621 623 instance->subsysvid = subsysvid;
622 624 instance->subsysid = subsysid;
623 625 instance->instance = instance_no;
624 626
625 627 /* Initialize FMA */
626 628 instance->fm_capabilities = ddi_prop_get_int(
627 629 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
628 630 "fm-capable", DDI_FM_EREPORT_CAPABLE |
629 631 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
630 632 | DDI_FM_ERRCB_CAPABLE);
631 633
632 634 mrsas_fm_init(instance);
633 635
634 636 /* Setup register map */
635 637 if ((ddi_dev_regsize(instance->dip,
636 638 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
637 639 reglength < MINIMUM_MFI_MEM_SZ) {
638 640 goto fail_attach;
639 641 }
640 642 if (reglength > DEFAULT_MFI_MEM_SZ) {
641 643 reglength = DEFAULT_MFI_MEM_SZ;
642 644 con_log(CL_DLEVEL1, (CE_NOTE,
643 645 "mr_sas: register length to map is 0x%lx bytes",
644 646 reglength));
645 647 }
646 648 if (ddi_regs_map_setup(instance->dip,
647 649 REGISTER_SET_IO_2108, &instance->regmap, 0,
648 650 reglength, &endian_attr, &instance->regmap_handle)
649 651 != DDI_SUCCESS) {
650 652 cmn_err(CE_WARN,
651 653 "mr_sas: couldn't map control registers");
652 654 goto fail_attach;
653 655 }
654 656
655 657 instance->unroll.regs = 1;
656 658
657 659 /*
658 660 * Disable Interrupt Now.
659 661 * Setup Software interrupt
660 662 */
661 663 instance->func_ptr->disable_intr(instance);
662 664
663 665 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
664 666 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
665 667 if (strncmp(data, "no", 3) == 0) {
666 668 msi_enable = 0;
667 669 con_log(CL_ANN1, (CE_WARN,
668 670 "msi_enable = %d disabled", msi_enable));
669 671 }
670 672 ddi_prop_free(data);
671 673 }
672 674
673 675 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
674 676
675 677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
676 678 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
677 679 if (strncmp(data, "no", 3) == 0) {
678 680 enable_fp = 0;
679 681 cmn_err(CE_NOTE,
680 682 "enable_fp = %d, Fast-Path disabled.\n",
681 683 enable_fp);
682 684 }
683 685
684 686 ddi_prop_free(data);
685 687 }
686 688
687 689 con_log(CL_DLEVEL1, (CE_NOTE, "enable_fp = %d\n", enable_fp));
688 690
689 691 /* Check for all supported interrupt types */
690 692 if (ddi_intr_get_supported_types(
691 693 dip, &intr_types) != DDI_SUCCESS) {
692 694 cmn_err(CE_WARN,
693 695 "ddi_intr_get_supported_types() failed");
694 696 goto fail_attach;
695 697 }
696 698
697 699 con_log(CL_DLEVEL1, (CE_NOTE,
698 700 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
699 701
700 702 /* Initialize and Setup Interrupt handler */
701 703 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
702 704 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
703 705 DDI_SUCCESS) {
704 706 cmn_err(CE_WARN,
705 707 "MSIX interrupt query failed");
706 708 goto fail_attach;
707 709 }
708 710 instance->intr_type = DDI_INTR_TYPE_MSIX;
709 711 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
710 712 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
711 713 DDI_SUCCESS) {
712 714 cmn_err(CE_WARN,
713 715 "MSI interrupt query failed");
714 716 goto fail_attach;
715 717 }
716 718 instance->intr_type = DDI_INTR_TYPE_MSI;
717 719 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
718 720 msi_enable = 0;
719 721 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
720 722 DDI_SUCCESS) {
721 723 cmn_err(CE_WARN,
722 724 "FIXED interrupt query failed");
723 725 goto fail_attach;
724 726 }
725 727 instance->intr_type = DDI_INTR_TYPE_FIXED;
726 728 } else {
727 729 cmn_err(CE_WARN, "Device cannot "
728 730 "suppport either FIXED or MSI/X "
729 731 "interrupts");
730 732 goto fail_attach;
731 733 }
732 734
733 735 instance->unroll.intr = 1;
734 736
735 737 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
736 738 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
737 739 if (strncmp(data, "no", 3) == 0) {
738 740 ctio_enable = 0;
739 741 con_log(CL_ANN1, (CE_WARN,
740 742 "ctio_enable = %d disabled", ctio_enable));
741 743 }
742 744 ddi_prop_free(data);
743 745 }
744 746
745 747 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
746 748
747 749 /* setup the mfi based low level driver */
748 750 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
749 751 cmn_err(CE_WARN, "mr_sas: "
750 752 "could not initialize the low level driver");
751 753
752 754 goto fail_attach;
753 755 }
754 756
755 757 /* Initialize all Mutex */
756 758 INIT_LIST_HEAD(&instance->completed_pool_list);
757 759 mutex_init(&instance->completed_pool_mtx, NULL,
758 760 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
759 761
760 762 mutex_init(&instance->sync_map_mtx, NULL,
761 763 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
762 764
763 765 mutex_init(&instance->app_cmd_pool_mtx, NULL,
764 766 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
765 767
766 768 mutex_init(&instance->config_dev_mtx, NULL,
767 769 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
768 770
769 771 mutex_init(&instance->cmd_pend_mtx, NULL,
770 772 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
771 773
772 774 mutex_init(&instance->ocr_flags_mtx, NULL,
773 775 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
774 776
775 777 mutex_init(&instance->int_cmd_mtx, NULL,
776 778 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
777 779 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
778 780
779 781 mutex_init(&instance->cmd_pool_mtx, NULL,
780 782 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
781 783
782 784 mutex_init(&instance->reg_write_mtx, NULL,
783 785 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
784 786
785 787 if (instance->tbolt) {
786 788 mutex_init(&instance->cmd_app_pool_mtx, NULL,
787 789 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
788 790
789 791 mutex_init(&instance->chip_mtx, NULL,
790 792 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
791 793
792 794 }
793 795
794 796 instance->unroll.mutexs = 1;
795 797
796 798 instance->timeout_id = (timeout_id_t)-1;
797 799
798 800 /* Register our soft-isr for highlevel interrupts. */
799 801 instance->isr_level = instance->intr_pri;
800 802 if (!(instance->tbolt)) {
801 803 if (instance->isr_level == HIGH_LEVEL_INTR) {
802 804 if (ddi_add_softintr(dip,
803 805 DDI_SOFTINT_HIGH,
804 806 &instance->soft_intr_id, NULL, NULL,
805 807 mrsas_softintr, (caddr_t)instance) !=
806 808 DDI_SUCCESS) {
807 809 cmn_err(CE_WARN,
808 810 "Software ISR did not register");
809 811
810 812 goto fail_attach;
811 813 }
812 814
813 815 instance->unroll.soft_isr = 1;
814 816
815 817 }
816 818 }
817 819
818 820 instance->softint_running = 0;
819 821
820 822 /* Allocate a transport structure */
821 823 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
822 824
823 825 if (tran == NULL) {
824 826 cmn_err(CE_WARN,
825 827 "scsi_hba_tran_alloc failed");
826 828 goto fail_attach;
827 829 }
828 830
829 831 instance->tran = tran;
830 832 instance->unroll.tran = 1;
831 833
832 834 tran->tran_hba_private = instance;
833 835 tran->tran_tgt_init = mrsas_tran_tgt_init;
834 836 tran->tran_tgt_probe = scsi_hba_probe;
835 837 tran->tran_tgt_free = mrsas_tran_tgt_free;
836 838 tran->tran_init_pkt = mrsas_tran_init_pkt;
837 839 if (instance->tbolt)
838 840 tran->tran_start = mrsas_tbolt_tran_start;
839 841 else
840 842 tran->tran_start = mrsas_tran_start;
841 843 tran->tran_abort = mrsas_tran_abort;
842 844 tran->tran_reset = mrsas_tran_reset;
843 845 tran->tran_getcap = mrsas_tran_getcap;
844 846 tran->tran_setcap = mrsas_tran_setcap;
845 847 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
846 848 tran->tran_dmafree = mrsas_tran_dmafree;
847 849 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
848 850 tran->tran_quiesce = mrsas_tran_quiesce;
849 851 tran->tran_unquiesce = mrsas_tran_unquiesce;
850 852 tran->tran_bus_config = mrsas_tran_bus_config;
851 853
852 854 if (mrsas_relaxed_ordering)
853 855 mrsas_generic_dma_attr.dma_attr_flags |=
854 856 DDI_DMA_RELAXED_ORDERING;
855 857
856 858
857 859 tran_dma_attr = mrsas_generic_dma_attr;
858 860 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
859 861
860 862 /* Attach this instance of the hba */
861 863 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
862 864 != DDI_SUCCESS) {
863 865 cmn_err(CE_WARN,
864 866 "scsi_hba_attach failed");
865 867
866 868 goto fail_attach;
867 869 }
868 870 instance->unroll.tranSetup = 1;
869 871 con_log(CL_ANN1,
870 872 (CE_CONT, "scsi_hba_attach_setup() done."));
871 873
872 874 /* create devctl node for cfgadm command */
873 875 if (ddi_create_minor_node(dip, "devctl",
874 876 S_IFCHR, INST2DEVCTL(instance_no),
875 877 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
876 878 cmn_err(CE_WARN,
877 879 "mr_sas: failed to create devctl node.");
878 880
879 881 goto fail_attach;
880 882 }
881 883
882 884 instance->unroll.devctl = 1;
883 885
884 886 /* create scsi node for cfgadm command */
885 887 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
886 888 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
887 889 DDI_FAILURE) {
888 890 cmn_err(CE_WARN,
889 891 "mr_sas: failed to create scsi node.");
890 892
891 893 goto fail_attach;
892 894 }
893 895
894 896 instance->unroll.scsictl = 1;
895 897
896 898 (void) sprintf(instance->iocnode, "%d:lsirdctl",
897 899 instance_no);
898 900
899 901 /*
900 902 * Create a node for applications
901 903 * for issuing ioctl to the driver.
902 904 */
903 905 if (ddi_create_minor_node(dip, instance->iocnode,
904 906 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
905 907 DDI_FAILURE) {
906 908 cmn_err(CE_WARN,
907 909 "mr_sas: failed to create ioctl node.");
908 910
909 911 goto fail_attach;
910 912 }
911 913
912 914 instance->unroll.ioctl = 1;
913 915
914 916 /* Create a taskq to handle dr events */
915 917 if ((instance->taskq = ddi_taskq_create(dip,
916 918 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
917 919 cmn_err(CE_WARN,
918 920 "mr_sas: failed to create taskq ");
919 921 instance->taskq = NULL;
920 922 goto fail_attach;
921 923 }
922 924 instance->unroll.taskq = 1;
923 925 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
924 926
925 927 /* enable interrupt */
926 928 instance->func_ptr->enable_intr(instance);
927 929
928 930 /* initiate AEN */
929 931 if (start_mfi_aen(instance)) {
930 932 cmn_err(CE_WARN,
931 933 "mr_sas: failed to initiate AEN.");
932 934 goto fail_attach;
933 935 }
934 936 instance->unroll.aenPend = 1;
935 937 con_log(CL_ANN1,
936 938 (CE_CONT, "AEN started for instance %d.", instance_no));
937 939
938 940 /* Finally! We are on the air. */
939 941 ddi_report_dev(dip);
940 942
941 943 /* FMA handle checking. */
942 944 if (mrsas_check_acc_handle(instance->regmap_handle) !=
943 945 DDI_SUCCESS) {
944 946 goto fail_attach;
945 947 }
946 948 if (mrsas_check_acc_handle(instance->pci_handle) !=
947 949 DDI_SUCCESS) {
948 950 goto fail_attach;
949 951 }
950 952
951 953 instance->mr_ld_list =
952 954 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
953 955 KM_SLEEP);
954 956 instance->unroll.ldlist_buff = 1;
955 957
956 958 #ifdef PDSUPPORT
957 959 if (instance->tbolt || instance->skinny) {
958 960 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
959 961 instance->mr_tbolt_pd_list =
960 962 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
961 963 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
962 964 ASSERT(instance->mr_tbolt_pd_list);
963 965 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
964 966 instance->mr_tbolt_pd_list[i].lun_type =
965 967 MRSAS_TBOLT_PD_LUN;
966 968 instance->mr_tbolt_pd_list[i].dev_id =
967 969 (uint8_t)i;
968 970 }
969 971
970 972 instance->unroll.pdlist_buff = 1;
971 973 }
972 974 #endif
973 975 break;
974 976 case DDI_PM_RESUME:
975 977 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
976 978 break;
977 979 case DDI_RESUME:
978 980 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
979 981 break;
980 982 default:
981 983 con_log(CL_ANN,
982 984 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
983 985 return (DDI_FAILURE);
984 986 }
985 987
986 988
987 989 con_log(CL_DLEVEL1,
988 990 (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
989 991 instance_no));
990 992 return (DDI_SUCCESS);
991 993
992 994 fail_attach:
993 995
994 996 mrsas_undo_resources(dip, instance);
995 997
996 998 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
997 999 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
998 1000
999 1001 mrsas_fm_fini(instance);
1000 1002
1001 1003 pci_config_teardown(&instance->pci_handle);
1002 1004 ddi_soft_state_free(mrsas_state, instance_no);
1003 1005
1004 1006 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
1005 1007
1006 1008 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
1007 1009 instance_no);
1008 1010
1009 1011 return (DDI_FAILURE);
1010 1012 }
1011 1013
1012 1014 /*
1013 1015 * getinfo - gets device information
1014 1016 * @dip:
1015 1017 * @cmd:
1016 1018 * @arg:
1017 1019 * @resultp:
1018 1020 *
1019 1021 * The system calls getinfo() to obtain configuration information that only
1020 1022 * the driver knows. The mapping of minor numbers to device instance is
1021 1023 * entirely under the control of the driver. The system sometimes needs to ask
1022 1024 * the driver which device a particular dev_t represents.
1023 1025 * Given the device number return the devinfo pointer from the scsi_device
1024 1026 * structure.
1025 1027 */
1026 1028 /*ARGSUSED*/
1027 1029 static int
1028 1030 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1029 1031 {
1030 1032 int rval;
1031 1033 int mrsas_minor = getminor((dev_t)arg);
1032 1034
1033 1035 struct mrsas_instance *instance;
1034 1036
1035 1037 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1036 1038
1037 1039 switch (cmd) {
1038 1040 case DDI_INFO_DEVT2DEVINFO:
1039 1041 instance = (struct mrsas_instance *)
1040 1042 ddi_get_soft_state(mrsas_state,
1041 1043 MINOR2INST(mrsas_minor));
1042 1044
1043 1045 if (instance == NULL) {
1044 1046 *resultp = NULL;
1045 1047 rval = DDI_FAILURE;
1046 1048 } else {
1047 1049 *resultp = instance->dip;
1048 1050 rval = DDI_SUCCESS;
1049 1051 }
1050 1052 break;
1051 1053 case DDI_INFO_DEVT2INSTANCE:
1052 1054 *resultp = (void *)(intptr_t)
1053 1055 (MINOR2INST(getminor((dev_t)arg)));
1054 1056 rval = DDI_SUCCESS;
1055 1057 break;
1056 1058 default:
1057 1059 *resultp = NULL;
1058 1060 rval = DDI_FAILURE;
1059 1061 }
1060 1062
1061 1063 return (rval);
1062 1064 }
1063 1065
1064 1066 /*
1065 1067 * detach - detaches a device from the system
1066 1068 * @dip: pointer to the device's dev_info structure
1067 1069 * @cmd: type of detach
1068 1070 *
1069 1071 * A driver's detach() entry point is called to detach an instance of a device
1070 1072 * that is bound to the driver. The entry point is called with the instance of
1071 1073 * the device node to be detached and with DDI_DETACH, which is specified as
1072 1074 * the cmd argument to the entry point.
1073 1075 * This routine is called during driver unload. We free all the allocated
1074 1076 * resources and call the corresponding LLD so that it can also release all
1075 1077 * its resources.
1076 1078 */
1077 1079 static int
1078 1080 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1079 1081 {
1080 1082 int instance_no;
1081 1083
1082 1084 struct mrsas_instance *instance;
1083 1085
1084 1086 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1085 1087
1086 1088
1087 1089 /* CONSTCOND */
1088 1090 ASSERT(NO_COMPETING_THREADS);
1089 1091
1090 1092 instance_no = ddi_get_instance(dip);
1091 1093
1092 1094 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1093 1095 instance_no);
1094 1096
1095 1097 if (!instance) {
1096 1098 cmn_err(CE_WARN,
1097 1099 "mr_sas:%d could not get instance in detach",
1098 1100 instance_no);
1099 1101
1100 1102 return (DDI_FAILURE);
1101 1103 }
1102 1104
1103 1105 con_log(CL_ANN, (CE_NOTE,
1104 1106 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1105 1107 instance_no, instance->vendor_id, instance->device_id,
1106 1108 instance->subsysvid, instance->subsysid));
1107 1109
1108 1110 switch (cmd) {
1109 1111 case DDI_DETACH:
1110 1112 con_log(CL_ANN, (CE_NOTE,
1111 1113 "mrsas_detach: DDI_DETACH"));
1112 1114
1113 1115 mutex_enter(&instance->config_dev_mtx);
1114 1116 if (instance->timeout_id != (timeout_id_t)-1) {
1115 1117 mutex_exit(&instance->config_dev_mtx);
1116 1118 (void) untimeout(instance->timeout_id);
1117 1119 instance->timeout_id = (timeout_id_t)-1;
1118 1120 mutex_enter(&instance->config_dev_mtx);
1119 1121 instance->unroll.timer = 0;
1120 1122 }
1121 1123 mutex_exit(&instance->config_dev_mtx);
1122 1124
1123 1125 if (instance->unroll.tranSetup == 1) {
1124 1126 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1125 1127 cmn_err(CE_WARN,
1126 1128 "mr_sas2%d: failed to detach",
1127 1129 instance_no);
1128 1130 return (DDI_FAILURE);
1129 1131 }
1130 1132 instance->unroll.tranSetup = 0;
1131 1133 con_log(CL_ANN1,
1132 1134 (CE_CONT, "scsi_hba_dettach() done."));
1133 1135 }
1134 1136
1135 1137 flush_cache(instance);
1136 1138
1137 1139 mrsas_undo_resources(dip, instance);
1138 1140
1139 1141 mrsas_fm_fini(instance);
1140 1142
1141 1143 pci_config_teardown(&instance->pci_handle);
1142 1144 ddi_soft_state_free(mrsas_state, instance_no);
1143 1145 break;
1144 1146
1145 1147 case DDI_PM_SUSPEND:
1146 1148 con_log(CL_ANN, (CE_NOTE,
1147 1149 "mrsas_detach: DDI_PM_SUSPEND"));
1148 1150
1149 1151 break;
1150 1152 case DDI_SUSPEND:
1151 1153 con_log(CL_ANN, (CE_NOTE,
1152 1154 "mrsas_detach: DDI_SUSPEND"));
1153 1155
1154 1156 break;
1155 1157 default:
1156 1158 con_log(CL_ANN, (CE_WARN,
1157 1159 "invalid detach command:0x%x", cmd));
1158 1160 return (DDI_FAILURE);
1159 1161 }
1160 1162
1161 1163 return (DDI_SUCCESS);
1162 1164 }
1163 1165
1164 1166
1165 1167 static void
1166 1168 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1167 1169 {
1168 1170 int instance_no;
1169 1171
1170 1172 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1171 1173
1172 1174
1173 1175 instance_no = ddi_get_instance(dip);
1174 1176
1175 1177
1176 1178 if (instance->unroll.ioctl == 1) {
1177 1179 ddi_remove_minor_node(dip, instance->iocnode);
1178 1180 instance->unroll.ioctl = 0;
1179 1181 }
1180 1182
1181 1183 if (instance->unroll.scsictl == 1) {
1182 1184 ddi_remove_minor_node(dip, "scsi");
1183 1185 instance->unroll.scsictl = 0;
1184 1186 }
1185 1187
1186 1188 if (instance->unroll.devctl == 1) {
1187 1189 ddi_remove_minor_node(dip, "devctl");
1188 1190 instance->unroll.devctl = 0;
1189 1191 }
1190 1192
1191 1193 if (instance->unroll.tranSetup == 1) {
1192 1194 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1193 1195 cmn_err(CE_WARN,
1194 1196 "mr_sas2%d: failed to detach", instance_no);
1195 1197 return; /* DDI_FAILURE */
1196 1198 }
1197 1199 instance->unroll.tranSetup = 0;
1198 1200 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1199 1201 }
1200 1202
1201 1203 if (instance->unroll.tran == 1) {
1202 1204 scsi_hba_tran_free(instance->tran);
1203 1205 instance->unroll.tran = 0;
1204 1206 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1205 1207 }
1206 1208
1207 1209 if (instance->unroll.syncCmd == 1) {
1208 1210 if (instance->tbolt) {
1209 1211 if (abort_syncmap_cmd(instance,
1210 1212 instance->map_update_cmd)) {
1211 1213 cmn_err(CE_WARN, "mrsas_detach: "
1212 1214 "failed to abort previous syncmap command");
1213 1215 }
1214 1216
1215 1217 instance->unroll.syncCmd = 0;
1216 1218 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1217 1219 }
1218 1220 }
1219 1221
1220 1222 if (instance->unroll.aenPend == 1) {
1221 1223 if (abort_aen_cmd(instance, instance->aen_cmd))
1222 1224 cmn_err(CE_WARN, "mrsas_detach: "
1223 1225 "failed to abort prevous AEN command");
1224 1226
1225 1227 instance->unroll.aenPend = 0;
1226 1228 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1227 1229 /* This means the controller is fully initialized and running */
1228 1230 /* Shutdown should be a last command to controller. */
1229 1231 /* shutdown_controller(); */
1230 1232 }
1231 1233
1232 1234
1233 1235 if (instance->unroll.timer == 1) {
1234 1236 if (instance->timeout_id != (timeout_id_t)-1) {
1235 1237 (void) untimeout(instance->timeout_id);
1236 1238 instance->timeout_id = (timeout_id_t)-1;
1237 1239
1238 1240 instance->unroll.timer = 0;
1239 1241 }
1240 1242 }
1241 1243
1242 1244 instance->func_ptr->disable_intr(instance);
1243 1245
1244 1246
1245 1247 if (instance->unroll.mutexs == 1) {
1246 1248 mutex_destroy(&instance->cmd_pool_mtx);
1247 1249 mutex_destroy(&instance->app_cmd_pool_mtx);
1248 1250 mutex_destroy(&instance->cmd_pend_mtx);
1249 1251 mutex_destroy(&instance->completed_pool_mtx);
1250 1252 mutex_destroy(&instance->sync_map_mtx);
1251 1253 mutex_destroy(&instance->int_cmd_mtx);
1252 1254 cv_destroy(&instance->int_cmd_cv);
1253 1255 mutex_destroy(&instance->config_dev_mtx);
1254 1256 mutex_destroy(&instance->ocr_flags_mtx);
1255 1257 mutex_destroy(&instance->reg_write_mtx);
1256 1258
1257 1259 if (instance->tbolt) {
1258 1260 mutex_destroy(&instance->cmd_app_pool_mtx);
1259 1261 mutex_destroy(&instance->chip_mtx);
1260 1262 }
1261 1263
1262 1264 instance->unroll.mutexs = 0;
1263 1265 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1264 1266 }
1265 1267
1266 1268
1267 1269 if (instance->unroll.soft_isr == 1) {
1268 1270 ddi_remove_softintr(instance->soft_intr_id);
1269 1271 instance->unroll.soft_isr = 0;
1270 1272 }
1271 1273
1272 1274 if (instance->unroll.intr == 1) {
1273 1275 mrsas_rem_intrs(instance);
1274 1276 instance->unroll.intr = 0;
1275 1277 }
1276 1278
1277 1279
1278 1280 if (instance->unroll.taskq == 1) {
1279 1281 if (instance->taskq) {
1280 1282 ddi_taskq_destroy(instance->taskq);
1281 1283 instance->unroll.taskq = 0;
1282 1284 }
1283 1285
1284 1286 }
1285 1287
1286 1288 /*
1287 1289 * free dma memory allocated for
1288 1290 * cmds/frames/queues/driver version etc
1289 1291 */
1290 1292 if (instance->unroll.verBuff == 1) {
1291 1293 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1292 1294 instance->unroll.verBuff = 0;
1293 1295 }
1294 1296
1295 1297 if (instance->unroll.pdlist_buff == 1) {
1296 1298 if (instance->mr_tbolt_pd_list != NULL) {
1297 1299 kmem_free(instance->mr_tbolt_pd_list,
1298 1300 MRSAS_TBOLT_GET_PD_MAX(instance) *
1299 1301 sizeof (struct mrsas_tbolt_pd));
1300 1302 }
1301 1303
1302 1304 instance->mr_tbolt_pd_list = NULL;
1303 1305 instance->unroll.pdlist_buff = 0;
1304 1306 }
1305 1307
1306 1308 if (instance->unroll.ldlist_buff == 1) {
1307 1309 if (instance->mr_ld_list != NULL) {
1308 1310 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1309 1311 * sizeof (struct mrsas_ld));
1310 1312 }
1311 1313
1312 1314 instance->mr_ld_list = NULL;
1313 1315 instance->unroll.ldlist_buff = 0;
1314 1316 }
1315 1317
1316 1318 if (instance->tbolt) {
1317 1319 if (instance->unroll.alloc_space_mpi2 == 1) {
1318 1320 free_space_for_mpi2(instance);
1319 1321 instance->unroll.alloc_space_mpi2 = 0;
1320 1322 }
1321 1323 } else {
1322 1324 if (instance->unroll.alloc_space_mfi == 1) {
1323 1325 free_space_for_mfi(instance);
1324 1326 instance->unroll.alloc_space_mfi = 0;
1325 1327 }
1326 1328 }
1327 1329
1328 1330 if (instance->unroll.regs == 1) {
1329 1331 ddi_regs_map_free(&instance->regmap_handle);
1330 1332 instance->unroll.regs = 0;
1331 1333 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1332 1334 }
1333 1335 }
1334 1336
1335 1337
1336 1338
1337 1339 /*
1338 1340 * ************************************************************************** *
1339 1341 * *
1340 1342 * common entry points - for character driver types *
1341 1343 * *
1342 1344 * ************************************************************************** *
1343 1345 */
1344 1346 /*
1345 1347 * open - gets access to a device
1346 1348 * @dev:
1347 1349 * @openflags:
1348 1350 * @otyp:
1349 1351 * @credp:
1350 1352 *
1351 1353 * Access to a device by one or more application programs is controlled
1352 1354 * through the open() and close() entry points. The primary function of
1353 1355 * open() is to verify that the open request is allowed.
1354 1356 */
1355 1357 static int
1356 1358 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1357 1359 {
1358 1360 int rval = 0;
1359 1361
1360 1362 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1361 1363
1362 1364 /* Check root permissions */
1363 1365 if (drv_priv(credp) != 0) {
1364 1366 con_log(CL_ANN, (CE_WARN,
1365 1367 "mr_sas: Non-root ioctl access denied!"));
1366 1368 return (EPERM);
1367 1369 }
1368 1370
1369 1371 /* Verify we are being opened as a character device */
1370 1372 if (otyp != OTYP_CHR) {
1371 1373 con_log(CL_ANN, (CE_WARN,
1372 1374 "mr_sas: ioctl node must be a char node"));
1373 1375 return (EINVAL);
1374 1376 }
1375 1377
1376 1378 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1377 1379 == NULL) {
1378 1380 return (ENXIO);
1379 1381 }
1380 1382
1381 1383 if (scsi_hba_open) {
1382 1384 rval = scsi_hba_open(dev, openflags, otyp, credp);
1383 1385 }
1384 1386
1385 1387 return (rval);
1386 1388 }
1387 1389
1388 1390 /*
1389 1391 * close - gives up access to a device
1390 1392 * @dev:
1391 1393 * @openflags:
1392 1394 * @otyp:
1393 1395 * @credp:
1394 1396 *
1395 1397 * close() should perform any cleanup necessary to finish using the minor
1396 1398 * device, and prepare the device (and driver) to be opened again.
1397 1399 */
1398 1400 static int
1399 1401 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1400 1402 {
1401 1403 int rval = 0;
1402 1404
1403 1405 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1404 1406
1405 1407 /* no need for locks! */
1406 1408
1407 1409 if (scsi_hba_close) {
1408 1410 rval = scsi_hba_close(dev, openflags, otyp, credp);
1409 1411 }
1410 1412
1411 1413 return (rval);
1412 1414 }
1413 1415
1414 1416 /*
1415 1417 * ioctl - performs a range of I/O commands for character drivers
1416 1418 * @dev:
1417 1419 * @cmd:
1418 1420 * @arg:
1419 1421 * @mode:
1420 1422 * @credp:
1421 1423 * @rvalp:
1422 1424 *
1423 1425 * ioctl() routine must make sure that user data is copied into or out of the
1424 1426 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1425 1427 * and ddi_copyout(), as appropriate.
1426 1428 * This is a wrapper routine to serialize access to the actual ioctl routine.
1427 1429 * ioctl() should return 0 on success, or the appropriate error number. The
1428 1430 * driver may also set the value returned to the calling process through rvalp.
1429 1431 */
1430 1432
1431 1433 static int
1432 1434 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1433 1435 int *rvalp)
1434 1436 {
1435 1437 int rval = 0;
1436 1438
1437 1439 struct mrsas_instance *instance;
1438 1440 struct mrsas_ioctl *ioctl;
1439 1441 struct mrsas_aen aen;
1440 1442 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1441 1443
1442 1444 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1443 1445
1444 1446 if (instance == NULL) {
1445 1447 /* invalid minor number */
1446 1448 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1447 1449 return (ENXIO);
1448 1450 }
1449 1451
1450 1452 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1451 1453 KM_SLEEP);
1452 1454 ASSERT(ioctl);
1453 1455
1454 1456 switch ((uint_t)cmd) {
1455 1457 case MRSAS_IOCTL_FIRMWARE:
1456 1458 if (ddi_copyin((void *)arg, ioctl,
1457 1459 sizeof (struct mrsas_ioctl), mode)) {
1458 1460 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1459 1461 "ERROR IOCTL copyin"));
1460 1462 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1461 1463 return (EFAULT);
1462 1464 }
1463 1465
1464 1466 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1465 1467 rval = handle_drv_ioctl(instance, ioctl, mode);
1466 1468 } else {
1467 1469 rval = handle_mfi_ioctl(instance, ioctl, mode);
1468 1470 }
1469 1471
1470 1472 if (ddi_copyout((void *)ioctl, (void *)arg,
1471 1473 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1472 1474 con_log(CL_ANN, (CE_WARN,
1473 1475 "mrsas_ioctl: copy_to_user failed"));
1474 1476 rval = 1;
1475 1477 }
1476 1478
1477 1479 break;
1478 1480 case MRSAS_IOCTL_AEN:
1479 1481 if (ddi_copyin((void *) arg, &aen,
1480 1482 sizeof (struct mrsas_aen), mode)) {
1481 1483 con_log(CL_ANN, (CE_WARN,
1482 1484 "mrsas_ioctl: ERROR AEN copyin"));
1483 1485 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1484 1486 return (EFAULT);
1485 1487 }
1486 1488
1487 1489 rval = handle_mfi_aen(instance, &aen);
1488 1490
1489 1491 if (ddi_copyout((void *) &aen, (void *)arg,
1490 1492 sizeof (struct mrsas_aen), mode)) {
1491 1493 con_log(CL_ANN, (CE_WARN,
1492 1494 "mrsas_ioctl: copy_to_user failed"));
1493 1495 rval = 1;
1494 1496 }
1495 1497
1496 1498 break;
1497 1499 default:
1498 1500 rval = scsi_hba_ioctl(dev, cmd, arg,
1499 1501 mode, credp, rvalp);
1500 1502
1501 1503 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1502 1504 "scsi_hba_ioctl called, ret = %x.", rval));
1503 1505 }
1504 1506
1505 1507 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1506 1508 return (rval);
1507 1509 }
1508 1510
1509 1511 /*
1510 1512 * ************************************************************************** *
1511 1513 * *
1512 1514 * common entry points - for block driver types *
1513 1515 * *
1514 1516 * ************************************************************************** *
1515 1517 */
1516 1518 #ifdef __sparc
1517 1519 /*
1518 1520 * reset - TBD
1519 1521 * @dip:
1520 1522 * @cmd:
1521 1523 *
1522 1524 * TBD
1523 1525 */
1524 1526 /*ARGSUSED*/
1525 1527 static int
1526 1528 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1527 1529 {
1528 1530 int instance_no;
1529 1531
1530 1532 struct mrsas_instance *instance;
1531 1533
1532 1534 instance_no = ddi_get_instance(dip);
1533 1535 instance = (struct mrsas_instance *)ddi_get_soft_state
1534 1536 (mrsas_state, instance_no);
1535 1537
1536 1538 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1537 1539
1538 1540 if (!instance) {
1539 1541 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1540 1542 "in reset", instance_no));
1541 1543 return (DDI_FAILURE);
1542 1544 }
1543 1545
1544 1546 instance->func_ptr->disable_intr(instance);
1545 1547
1546 1548 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1547 1549 instance_no));
1548 1550
1549 1551 flush_cache(instance);
1550 1552
1551 1553 return (DDI_SUCCESS);
1552 1554 }
1553 1555 #else /* __sparc */
1554 1556 /*ARGSUSED*/
1555 1557 static int
1556 1558 mrsas_quiesce(dev_info_t *dip)
1557 1559 {
1558 1560 int instance_no;
1559 1561
1560 1562 struct mrsas_instance *instance;
1561 1563
1562 1564 instance_no = ddi_get_instance(dip);
1563 1565 instance = (struct mrsas_instance *)ddi_get_soft_state
1564 1566 (mrsas_state, instance_no);
1565 1567
1566 1568 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1567 1569
1568 1570 if (!instance) {
1569 1571 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1570 1572 "in quiesce", instance_no));
1571 1573 return (DDI_FAILURE);
1572 1574 }
1573 1575 if (instance->deadadapter || instance->adapterresetinprogress) {
1574 1576 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1575 1577 "healthy state", instance_no));
1576 1578 return (DDI_FAILURE);
1577 1579 }
1578 1580
1579 1581 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1580 1582 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1581 1583 "failed to abort prevous AEN command QUIESCE"));
1582 1584 }
1583 1585
1584 1586 if (instance->tbolt) {
1585 1587 if (abort_syncmap_cmd(instance,
1586 1588 instance->map_update_cmd)) {
1587 1589 cmn_err(CE_WARN,
1588 1590 "mrsas_detach: failed to abort "
1589 1591 "previous syncmap command");
1590 1592 return (DDI_FAILURE);
1591 1593 }
1592 1594 }
1593 1595
1594 1596 instance->func_ptr->disable_intr(instance);
1595 1597
1596 1598 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1597 1599 instance_no));
1598 1600
1599 1601 flush_cache(instance);
1600 1602
1601 1603 if (wait_for_outstanding(instance)) {
1602 1604 con_log(CL_ANN1,
1603 1605 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1604 1606 return (DDI_FAILURE);
1605 1607 }
1606 1608 return (DDI_SUCCESS);
1607 1609 }
1608 1610 #endif /* __sparc */
1609 1611
1610 1612 /*
1611 1613 * ************************************************************************** *
1612 1614 * *
1613 1615 * entry points (SCSI HBA) *
1614 1616 * *
1615 1617 * ************************************************************************** *
1616 1618 */
1617 1619 /*
1618 1620 * tran_tgt_init - initialize a target device instance
1619 1621 * @hba_dip:
1620 1622 * @tgt_dip:
1621 1623 * @tran:
1622 1624 * @sd:
1623 1625 *
1624 1626 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1625 1627 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1626 1628 * the device's address as valid and supportable for that particular HBA.
1627 1629 * By returning DDI_FAILURE, the instance of the target driver for that device
1628 1630 * is not probed or attached.
1629 1631 */
1630 1632 /*ARGSUSED*/
1631 1633 static int
1632 1634 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1633 1635 scsi_hba_tran_t *tran, struct scsi_device *sd)
1634 1636 {
1635 1637 struct mrsas_instance *instance;
1636 1638 uint16_t tgt = sd->sd_address.a_target;
1637 1639 uint8_t lun = sd->sd_address.a_lun;
1638 1640 dev_info_t *child = NULL;
1639 1641
1640 1642 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1641 1643 tgt, lun));
1642 1644
1643 1645 instance = ADDR2MR(&sd->sd_address);
1644 1646
1645 1647 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1646 1648 /*
1647 1649 * If no persistent node exists, we don't allow .conf node
1648 1650 * to be created.
1649 1651 */
1650 1652 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1651 1653 con_log(CL_DLEVEL2,
1652 1654 (CE_NOTE, "mrsas_tgt_init find child ="
1653 1655 " %p t = %d l = %d", (void *)child, tgt, lun));
1654 1656 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1655 1657 DDI_SUCCESS)
1656 1658 /* Create this .conf node */
1657 1659 return (DDI_SUCCESS);
1658 1660 }
1659 1661 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1660 1662 "DDI_FAILURE t = %d l = %d", tgt, lun));
1661 1663 return (DDI_FAILURE);
1662 1664
1663 1665 }
1664 1666
1665 1667 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1666 1668 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1667 1669
1668 1670 if (tgt < MRDRV_MAX_LD && lun == 0) {
1669 1671 if (instance->mr_ld_list[tgt].dip == NULL &&
1670 1672 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1671 1673 mutex_enter(&instance->config_dev_mtx);
1672 1674 instance->mr_ld_list[tgt].dip = tgt_dip;
1673 1675 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1674 1676 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1675 1677 mutex_exit(&instance->config_dev_mtx);
1676 1678 }
1677 1679 }
1678 1680
1679 1681 #ifdef PDSUPPORT
1680 1682 else if (instance->tbolt || instance->skinny) {
1681 1683 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1682 1684 mutex_enter(&instance->config_dev_mtx);
1683 1685 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1684 1686 instance->mr_tbolt_pd_list[tgt].flag =
1685 1687 MRDRV_TGT_VALID;
1686 1688 mutex_exit(&instance->config_dev_mtx);
1687 1689 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1688 1690 "t%xl%x", tgt, lun));
1689 1691 }
1690 1692 }
1691 1693 #endif
1692 1694
1693 1695 return (DDI_SUCCESS);
1694 1696 }
1695 1697
1696 1698 /*ARGSUSED*/
1697 1699 static void
1698 1700 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1699 1701 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1700 1702 {
1701 1703 struct mrsas_instance *instance;
1702 1704 int tgt = sd->sd_address.a_target;
1703 1705 int lun = sd->sd_address.a_lun;
1704 1706
1705 1707 instance = ADDR2MR(&sd->sd_address);
1706 1708
1707 1709 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1708 1710
1709 1711 if (tgt < MRDRV_MAX_LD && lun == 0) {
1710 1712 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1711 1713 mutex_enter(&instance->config_dev_mtx);
1712 1714 instance->mr_ld_list[tgt].dip = NULL;
1713 1715 mutex_exit(&instance->config_dev_mtx);
1714 1716 }
1715 1717 }
1716 1718
1717 1719 #ifdef PDSUPPORT
1718 1720 else if (instance->tbolt || instance->skinny) {
1719 1721 mutex_enter(&instance->config_dev_mtx);
1720 1722 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1721 1723 mutex_exit(&instance->config_dev_mtx);
1722 1724 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1723 1725 "for tgt:%x", tgt));
1724 1726 }
1725 1727 #endif
1726 1728
1727 1729 }
1728 1730
1729 1731 dev_info_t *
1730 1732 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1731 1733 {
1732 1734 dev_info_t *child = NULL;
1733 1735 char addr[SCSI_MAXNAMELEN];
1734 1736 char tmp[MAXNAMELEN];
1735 1737
1736 1738 (void) sprintf(addr, "%x,%x", tgt, lun);
1737 1739 for (child = ddi_get_child(instance->dip); child;
1738 1740 child = ddi_get_next_sibling(child)) {
1739 1741
1740 1742 if (ndi_dev_is_persistent_node(child) == 0) {
1741 1743 continue;
1742 1744 }
1743 1745
1744 1746 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1745 1747 DDI_SUCCESS) {
1746 1748 continue;
1747 1749 }
1748 1750
1749 1751 if (strcmp(addr, tmp) == 0) {
1750 1752 break;
1751 1753 }
1752 1754 }
1753 1755 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1754 1756 (void *)child));
1755 1757 return (child);
1756 1758 }
1757 1759
1758 1760 /*
1759 1761 * mrsas_name_node -
1760 1762 * @dip:
1761 1763 * @name:
1762 1764 * @len:
1763 1765 */
1764 1766 static int
1765 1767 mrsas_name_node(dev_info_t *dip, char *name, int len)
1766 1768 {
1767 1769 int tgt, lun;
1768 1770
1769 1771 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1770 1772 DDI_PROP_DONTPASS, "target", -1);
1771 1773 con_log(CL_DLEVEL2, (CE_NOTE,
1772 1774 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1773 1775 if (tgt == -1) {
1774 1776 return (DDI_FAILURE);
1775 1777 }
1776 1778 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1777 1779 "lun", -1);
1778 1780 con_log(CL_DLEVEL2,
1779 1781 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1780 1782 if (lun == -1) {
1781 1783 return (DDI_FAILURE);
1782 1784 }
1783 1785 (void) snprintf(name, len, "%x,%x", tgt, lun);
1784 1786 return (DDI_SUCCESS);
1785 1787 }
1786 1788
1787 1789 /*
1788 1790 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1789 1791 * @ap:
1790 1792 * @pkt:
1791 1793 * @bp:
1792 1794 * @cmdlen:
1793 1795 * @statuslen:
1794 1796 * @tgtlen:
1795 1797 * @flags:
1796 1798 * @callback:
1797 1799 *
1798 1800 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1799 1801 * structure and DMA resources for a target driver request. The
1800 1802 * tran_init_pkt() entry point is called when the target driver calls the
1801 1803 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1802 1804 * is a request to perform one or more of three possible services:
1803 1805 * - allocation and initialization of a scsi_pkt structure
1804 1806 * - allocation of DMA resources for data transfer
1805 1807 * - reallocation of DMA resources for the next portion of the data transfer
1806 1808 */
1807 1809 static struct scsi_pkt *
1808 1810 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1809 1811 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1810 1812 int flags, int (*callback)(), caddr_t arg)
1811 1813 {
1812 1814 struct scsa_cmd *acmd;
1813 1815 struct mrsas_instance *instance;
1814 1816 struct scsi_pkt *new_pkt;
1815 1817
1816 1818 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1817 1819
1818 1820 instance = ADDR2MR(ap);
1819 1821
1820 1822 /* step #1 : pkt allocation */
1821 1823 if (pkt == NULL) {
1822 1824 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1823 1825 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1824 1826 if (pkt == NULL) {
1825 1827 return (NULL);
1826 1828 }
1827 1829
1828 1830 acmd = PKT2CMD(pkt);
1829 1831
1830 1832 /*
1831 1833 * Initialize the new pkt - we redundantly initialize
1832 1834 * all the fields for illustrative purposes.
1833 1835 */
1834 1836 acmd->cmd_pkt = pkt;
1835 1837 acmd->cmd_flags = 0;
1836 1838 acmd->cmd_scblen = statuslen;
1837 1839 acmd->cmd_cdblen = cmdlen;
1838 1840 acmd->cmd_dmahandle = NULL;
1839 1841 acmd->cmd_ncookies = 0;
1840 1842 acmd->cmd_cookie = 0;
1841 1843 acmd->cmd_cookiecnt = 0;
1842 1844 acmd->cmd_nwin = 0;
1843 1845
1844 1846 pkt->pkt_address = *ap;
1845 1847 pkt->pkt_comp = (void (*)())NULL;
1846 1848 pkt->pkt_flags = 0;
1847 1849 pkt->pkt_time = 0;
1848 1850 pkt->pkt_resid = 0;
1849 1851 pkt->pkt_state = 0;
1850 1852 pkt->pkt_statistics = 0;
1851 1853 pkt->pkt_reason = 0;
1852 1854 new_pkt = pkt;
1853 1855 } else {
1854 1856 acmd = PKT2CMD(pkt);
1855 1857 new_pkt = NULL;
1856 1858 }
1857 1859
1858 1860 /* step #2 : dma allocation/move */
1859 1861 if (bp && bp->b_bcount != 0) {
1860 1862 if (acmd->cmd_dmahandle == NULL) {
1861 1863 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1862 1864 callback) == DDI_FAILURE) {
1863 1865 if (new_pkt) {
1864 1866 scsi_hba_pkt_free(ap, new_pkt);
1865 1867 }
1866 1868 return ((struct scsi_pkt *)NULL);
1867 1869 }
1868 1870 } else {
1869 1871 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1870 1872 return ((struct scsi_pkt *)NULL);
1871 1873 }
1872 1874 }
1873 1875 }
1874 1876
1875 1877 return (pkt);
1876 1878 }
1877 1879
1878 1880 /*
1879 1881 * tran_start - transport a SCSI command to the addressed target
1880 1882 * @ap:
1881 1883 * @pkt:
1882 1884 *
1883 1885 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1884 1886 * SCSI command to the addressed target. The SCSI command is described
1885 1887 * entirely within the scsi_pkt structure, which the target driver allocated
1886 1888 * through the HBA driver's tran_init_pkt() entry point. If the command
1887 1889 * involves a data transfer, DMA resources must also have been allocated for
1888 1890 * the scsi_pkt structure.
1889 1891 *
1890 1892 * Return Values :
1891 1893 * TRAN_BUSY - request queue is full, no more free scbs
1892 1894 * TRAN_ACCEPT - pkt has been submitted to the instance
1893 1895 */
1894 1896 static int
1895 1897 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1896 1898 {
1897 1899 uchar_t cmd_done = 0;
1898 1900
1899 1901 struct mrsas_instance *instance = ADDR2MR(ap);
1900 1902 struct mrsas_cmd *cmd;
1901 1903
1902 1904 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1903 1905 if (instance->deadadapter == 1) {
1904 1906 con_log(CL_ANN1, (CE_WARN,
1905 1907 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1906 1908 "for IO, as the HBA doesnt take any more IOs"));
1907 1909 if (pkt) {
1908 1910 pkt->pkt_reason = CMD_DEV_GONE;
1909 1911 pkt->pkt_statistics = STAT_DISCON;
1910 1912 }
1911 1913 return (TRAN_FATAL_ERROR);
1912 1914 }
1913 1915
1914 1916 if (instance->adapterresetinprogress) {
1915 1917 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1916 1918 "returning mfi_pkt and setting TRAN_BUSY\n"));
1917 1919 return (TRAN_BUSY);
1918 1920 }
1919 1921
1920 1922 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1921 1923 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1922 1924
1923 1925 pkt->pkt_reason = CMD_CMPLT;
1924 1926 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1925 1927
1926 1928 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1927 1929
1928 1930 /*
1929 1931 * Check if the command is already completed by the mrsas_build_cmd()
1930 1932 * routine. In which case the busy_flag would be clear and scb will be
1931 1933 * NULL and appropriate reason provided in pkt_reason field
1932 1934 */
1933 1935 if (cmd_done) {
1934 1936 pkt->pkt_reason = CMD_CMPLT;
1935 1937 pkt->pkt_scbp[0] = STATUS_GOOD;
1936 1938 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1937 1939 | STATE_SENT_CMD;
1938 1940 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1939 1941 (*pkt->pkt_comp)(pkt);
1940 1942 }
1941 1943
1942 1944 return (TRAN_ACCEPT);
1943 1945 }
1944 1946
1945 1947 if (cmd == NULL) {
1946 1948 return (TRAN_BUSY);
1947 1949 }
1948 1950
1949 1951 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1950 1952 if (instance->fw_outstanding > instance->max_fw_cmds) {
1951 1953 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1952 1954 DTRACE_PROBE2(start_tran_err,
1953 1955 uint16_t, instance->fw_outstanding,
1954 1956 uint16_t, instance->max_fw_cmds);
1955 1957 mrsas_return_mfi_pkt(instance, cmd);
1956 1958 return (TRAN_BUSY);
1957 1959 }
1958 1960
1959 1961 /* Synchronize the Cmd frame for the controller */
1960 1962 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1961 1963 DDI_DMA_SYNC_FORDEV);
1962 1964 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1963 1965 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1964 1966 instance->func_ptr->issue_cmd(cmd, instance);
1965 1967
1966 1968 } else {
1967 1969 struct mrsas_header *hdr = &cmd->frame->hdr;
1968 1970
1969 1971 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1970 1972
1971 1973 pkt->pkt_reason = CMD_CMPLT;
1972 1974 pkt->pkt_statistics = 0;
1973 1975 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1974 1976
1975 1977 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1976 1978 &hdr->cmd_status)) {
1977 1979 case MFI_STAT_OK:
1978 1980 pkt->pkt_scbp[0] = STATUS_GOOD;
1979 1981 break;
1980 1982
1981 1983 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1982 1984 con_log(CL_ANN, (CE_CONT,
1983 1985 "mrsas_tran_start: scsi done with error"));
1984 1986 pkt->pkt_reason = CMD_CMPLT;
1985 1987 pkt->pkt_statistics = 0;
1986 1988
1987 1989 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1988 1990 break;
1989 1991
1990 1992 case MFI_STAT_DEVICE_NOT_FOUND:
1991 1993 con_log(CL_ANN, (CE_CONT,
1992 1994 "mrsas_tran_start: device not found error"));
1993 1995 pkt->pkt_reason = CMD_DEV_GONE;
1994 1996 pkt->pkt_statistics = STAT_DISCON;
1995 1997 break;
1996 1998
1997 1999 default:
1998 2000 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1999 2001 }
2000 2002
2001 2003 (void) mrsas_common_check(instance, cmd);
2002 2004 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2003 2005 uint8_t, hdr->cmd_status);
2004 2006 mrsas_return_mfi_pkt(instance, cmd);
2005 2007
2006 2008 if (pkt->pkt_comp) {
2007 2009 (*pkt->pkt_comp)(pkt);
2008 2010 }
2009 2011
2010 2012 }
2011 2013
2012 2014 return (TRAN_ACCEPT);
2013 2015 }
2014 2016
2015 2017 /*
2016 2018 * tran_abort - Abort any commands that are currently in transport
2017 2019 * @ap:
2018 2020 * @pkt:
2019 2021 *
2020 2022 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2021 2023 * commands that are currently in transport for a particular target. This entry
2022 2024 * point is called when a target driver calls scsi_abort(). The tran_abort()
2023 2025 * entry point should attempt to abort the command denoted by the pkt
2024 2026 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2025 2027 * abort all outstanding commands in the transport layer for the particular
2026 2028 * target or logical unit.
2027 2029 */
2028 2030 /*ARGSUSED*/
2029 2031 static int
2030 2032 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2031 2033 {
2032 2034 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2033 2035
2034 2036 /* abort command not supported by H/W */
2035 2037
2036 2038 return (DDI_FAILURE);
2037 2039 }
2038 2040
2039 2041 /*
2040 2042 * tran_reset - reset either the SCSI bus or target
2041 2043 * @ap:
2042 2044 * @level:
2043 2045 *
2044 2046 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2045 2047 * the SCSI bus or a particular SCSI target device. This entry point is called
2046 2048 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2047 2049 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2048 2050 * particular target or logical unit must be reset.
2049 2051 */
2050 2052 /*ARGSUSED*/
2051 2053 static int
2052 2054 mrsas_tran_reset(struct scsi_address *ap, int level)
2053 2055 {
2054 2056 struct mrsas_instance *instance = ADDR2MR(ap);
2055 2057
2056 2058 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2057 2059
2058 2060 if (wait_for_outstanding(instance)) {
2059 2061 con_log(CL_ANN1,
2060 2062 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2061 2063 return (DDI_FAILURE);
2062 2064 } else {
2063 2065 return (DDI_SUCCESS);
2064 2066 }
2065 2067 }
2066 2068
2067 2069 /*
2068 2070 * tran_getcap - get one of a set of SCSA-defined capabilities
2069 2071 * @ap:
2070 2072 * @cap:
2071 2073 * @whom:
2072 2074 *
2073 2075 * The target driver can request the current setting of the capability for a
2074 2076 * particular target by setting the whom parameter to nonzero. A whom value of
2075 2077 * zero indicates a request for the current setting of the general capability
2076 2078 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2077 2079 * for undefined capabilities or the current value of the requested capability.
2078 2080 */
2079 2081 /*ARGSUSED*/
2080 2082 static int
2081 2083 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2082 2084 {
2083 2085 int rval = 0;
2084 2086
2085 2087 struct mrsas_instance *instance = ADDR2MR(ap);
2086 2088
2087 2089 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2088 2090
2089 2091 /* we do allow inquiring about capabilities for other targets */
2090 2092 if (cap == NULL) {
2091 2093 return (-1);
2092 2094 }
2093 2095
2094 2096 switch (scsi_hba_lookup_capstr(cap)) {
2095 2097 case SCSI_CAP_DMA_MAX:
2096 2098 if (instance->tbolt) {
2097 2099 /* Limit to 256k max transfer */
2098 2100 rval = mrsas_tbolt_max_cap_maxxfer;
2099 2101 } else {
2100 2102 /* Limit to 16MB max transfer */
2101 2103 rval = mrsas_max_cap_maxxfer;
2102 2104 }
2103 2105 break;
2104 2106 case SCSI_CAP_MSG_OUT:
2105 2107 rval = 1;
2106 2108 break;
2107 2109 case SCSI_CAP_DISCONNECT:
2108 2110 rval = 0;
2109 2111 break;
2110 2112 case SCSI_CAP_SYNCHRONOUS:
2111 2113 rval = 0;
2112 2114 break;
2113 2115 case SCSI_CAP_WIDE_XFER:
2114 2116 rval = 1;
2115 2117 break;
2116 2118 case SCSI_CAP_TAGGED_QING:
2117 2119 rval = 1;
2118 2120 break;
2119 2121 case SCSI_CAP_UNTAGGED_QING:
2120 2122 rval = 1;
2121 2123 break;
2122 2124 case SCSI_CAP_PARITY:
2123 2125 rval = 1;
2124 2126 break;
2125 2127 case SCSI_CAP_INITIATOR_ID:
2126 2128 rval = instance->init_id;
2127 2129 break;
2128 2130 case SCSI_CAP_ARQ:
2129 2131 rval = 1;
2130 2132 break;
2131 2133 case SCSI_CAP_LINKED_CMDS:
2132 2134 rval = 0;
2133 2135 break;
2134 2136 case SCSI_CAP_RESET_NOTIFICATION:
2135 2137 rval = 1;
2136 2138 break;
2137 2139 case SCSI_CAP_GEOMETRY:
2138 2140 rval = -1;
2139 2141
2140 2142 break;
2141 2143 default:
2142 2144 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2143 2145 scsi_hba_lookup_capstr(cap)));
2144 2146 rval = -1;
2145 2147 break;
2146 2148 }
2147 2149
2148 2150 return (rval);
2149 2151 }
2150 2152
2151 2153 /*
2152 2154 * tran_setcap - set one of a set of SCSA-defined capabilities
2153 2155 * @ap:
2154 2156 * @cap:
2155 2157 * @value:
2156 2158 * @whom:
2157 2159 *
2158 2160 * The target driver might request that the new value be set for a particular
2159 2161 * target by setting the whom parameter to nonzero. A whom value of zero
2160 2162 * means that request is to set the new value for the SCSI bus or for adapter
2161 2163 * hardware in general.
2162 2164 * The tran_setcap() should return the following values as appropriate:
2163 2165 * - -1 for undefined capabilities
2164 2166 * - 0 if the HBA driver cannot set the capability to the requested value
2165 2167 * - 1 if the HBA driver is able to set the capability to the requested value
2166 2168 */
2167 2169 /*ARGSUSED*/
2168 2170 static int
2169 2171 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2170 2172 {
2171 2173 int rval = 1;
2172 2174
2173 2175 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2174 2176
2175 2177 /* We don't allow setting capabilities for other targets */
2176 2178 if (cap == NULL || whom == 0) {
2177 2179 return (-1);
2178 2180 }
2179 2181
2180 2182 switch (scsi_hba_lookup_capstr(cap)) {
2181 2183 case SCSI_CAP_DMA_MAX:
2182 2184 case SCSI_CAP_MSG_OUT:
2183 2185 case SCSI_CAP_PARITY:
2184 2186 case SCSI_CAP_LINKED_CMDS:
2185 2187 case SCSI_CAP_RESET_NOTIFICATION:
2186 2188 case SCSI_CAP_DISCONNECT:
2187 2189 case SCSI_CAP_SYNCHRONOUS:
2188 2190 case SCSI_CAP_UNTAGGED_QING:
2189 2191 case SCSI_CAP_WIDE_XFER:
2190 2192 case SCSI_CAP_INITIATOR_ID:
2191 2193 case SCSI_CAP_ARQ:
2192 2194 /*
2193 2195 * None of these are settable via
2194 2196 * the capability interface.
2195 2197 */
2196 2198 break;
2197 2199 case SCSI_CAP_TAGGED_QING:
2198 2200 rval = 1;
2199 2201 break;
2200 2202 case SCSI_CAP_SECTOR_SIZE:
2201 2203 rval = 1;
2202 2204 break;
2203 2205
2204 2206 case SCSI_CAP_TOTAL_SECTORS:
2205 2207 rval = 1;
2206 2208 break;
2207 2209 default:
2208 2210 rval = -1;
2209 2211 break;
2210 2212 }
2211 2213
2212 2214 return (rval);
2213 2215 }
2214 2216
2215 2217 /*
2216 2218 * tran_destroy_pkt - deallocate scsi_pkt structure
2217 2219 * @ap:
2218 2220 * @pkt:
2219 2221 *
2220 2222 * The tran_destroy_pkt() entry point is the HBA driver function that
2221 2223 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2222 2224 * called when the target driver calls scsi_destroy_pkt(). The
2223 2225 * tran_destroy_pkt() entry point must free any DMA resources that have been
2224 2226 * allocated for the packet. An implicit DMA synchronization occurs if the
2225 2227 * DMA resources are freed and any cached data remains after the completion
2226 2228 * of the transfer.
2227 2229 */
2228 2230 static void
2229 2231 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2230 2232 {
2231 2233 struct scsa_cmd *acmd = PKT2CMD(pkt);
2232 2234
2233 2235 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2234 2236
2235 2237 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2236 2238 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2237 2239
2238 2240 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2239 2241
2240 2242 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2241 2243
2242 2244 acmd->cmd_dmahandle = NULL;
2243 2245 }
2244 2246
2245 2247 /* free the pkt */
2246 2248 scsi_hba_pkt_free(ap, pkt);
2247 2249 }
2248 2250
2249 2251 /*
2250 2252 * tran_dmafree - deallocates DMA resources
2251 2253 * @ap:
2252 2254 * @pkt:
2253 2255 *
2254 2256 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2255 2257 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2256 2258 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2257 2259 * free only DMA resources allocated for a scsi_pkt structure, not the
2258 2260 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2259 2261 * implicitly performed.
2260 2262 */
2261 2263 /*ARGSUSED*/
2262 2264 static void
2263 2265 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2264 2266 {
2265 2267 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2266 2268
2267 2269 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2268 2270
2269 2271 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2270 2272 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2271 2273
2272 2274 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2273 2275
2274 2276 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2275 2277
2276 2278 acmd->cmd_dmahandle = NULL;
2277 2279 }
2278 2280 }
2279 2281
2280 2282 /*
2281 2283 * tran_sync_pkt - synchronize the DMA object allocated
2282 2284 * @ap:
2283 2285 * @pkt:
2284 2286 *
2285 2287 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2286 2288 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2287 2289 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2288 2290 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2289 2291 * must synchronize the CPU's view of the data. If the data transfer direction
2290 2292 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2291 2293 * device's view of the data.
2292 2294 */
2293 2295 /*ARGSUSED*/
2294 2296 static void
2295 2297 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2296 2298 {
2297 2299 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2298 2300
2299 2301 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2300 2302
2301 2303 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2302 2304 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2303 2305 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2304 2306 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2305 2307 }
2306 2308 }
2307 2309
2308 2310 /*ARGSUSED*/
2309 2311 static int
2310 2312 mrsas_tran_quiesce(dev_info_t *dip)
2311 2313 {
2312 2314 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2313 2315
2314 2316 return (1);
2315 2317 }
2316 2318
2317 2319 /*ARGSUSED*/
2318 2320 static int
2319 2321 mrsas_tran_unquiesce(dev_info_t *dip)
2320 2322 {
2321 2323 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2322 2324
2323 2325 return (1);
2324 2326 }
2325 2327
2326 2328
2327 2329 /*
2328 2330 * mrsas_isr(caddr_t)
2329 2331 *
2330 2332 * The Interrupt Service Routine
2331 2333 *
2332 2334 * Collect status for all completed commands and do callback
2333 2335 *
2334 2336 */
2335 2337 static uint_t
2336 2338 mrsas_isr(struct mrsas_instance *instance)
2337 2339 {
2338 2340 int need_softintr;
2339 2341 uint32_t producer;
2340 2342 uint32_t consumer;
2341 2343 uint32_t context;
2342 2344 int retval;
2343 2345
2344 2346 struct mrsas_cmd *cmd;
2345 2347 struct mrsas_header *hdr;
2346 2348 struct scsi_pkt *pkt;
2347 2349
2348 2350 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2349 2351 ASSERT(instance);
2350 2352 if (instance->tbolt) {
2351 2353 mutex_enter(&instance->chip_mtx);
2352 2354 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2353 2355 !(instance->func_ptr->intr_ack(instance))) {
2354 2356 mutex_exit(&instance->chip_mtx);
2355 2357 return (DDI_INTR_UNCLAIMED);
2356 2358 }
2357 2359 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2358 2360 mutex_exit(&instance->chip_mtx);
2359 2361 return (retval);
2360 2362 } else {
2361 2363 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2362 2364 !instance->func_ptr->intr_ack(instance)) {
2363 2365 return (DDI_INTR_UNCLAIMED);
2364 2366 }
2365 2367 }
2366 2368
2367 2369 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2368 2370 0, 0, DDI_DMA_SYNC_FORCPU);
2369 2371
2370 2372 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2371 2373 != DDI_SUCCESS) {
2372 2374 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2373 2375 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2374 2376 con_log(CL_ANN1, (CE_WARN,
2375 2377 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2376 2378 return (DDI_INTR_CLAIMED);
2377 2379 }
2378 2380 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2379 2381
2380 2382 #ifdef OCRDEBUG
2381 2383 if (debug_consecutive_timeout_after_ocr_g == 1) {
2382 2384 con_log(CL_ANN1, (CE_NOTE,
2383 2385 "simulating consecutive timeout after ocr"));
2384 2386 return (DDI_INTR_CLAIMED);
2385 2387 }
2386 2388 #endif
2387 2389
2388 2390 mutex_enter(&instance->completed_pool_mtx);
2389 2391 mutex_enter(&instance->cmd_pend_mtx);
2390 2392
2391 2393 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2392 2394 instance->producer);
2393 2395 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2394 2396 instance->consumer);
2395 2397
2396 2398 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2397 2399 producer, consumer));
2398 2400 if (producer == consumer) {
2399 2401 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2400 2402 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2401 2403 uint32_t, consumer);
2402 2404 mutex_exit(&instance->cmd_pend_mtx);
2403 2405 mutex_exit(&instance->completed_pool_mtx);
2404 2406 return (DDI_INTR_CLAIMED);
2405 2407 }
2406 2408
2407 2409 while (consumer != producer) {
2408 2410 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2409 2411 &instance->reply_queue[consumer]);
2410 2412 cmd = instance->cmd_list[context];
2411 2413
2412 2414 if (cmd->sync_cmd == MRSAS_TRUE) {
2413 2415 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2414 2416 if (hdr) {
2415 2417 mlist_del_init(&cmd->list);
2416 2418 }
2417 2419 } else {
2418 2420 pkt = cmd->pkt;
2419 2421 if (pkt) {
2420 2422 mlist_del_init(&cmd->list);
2421 2423 }
2422 2424 }
2423 2425
2424 2426 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2425 2427
2426 2428 consumer++;
2427 2429 if (consumer == (instance->max_fw_cmds + 1)) {
2428 2430 consumer = 0;
2429 2431 }
2430 2432 }
2431 2433 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2432 2434 instance->consumer, consumer);
2433 2435 mutex_exit(&instance->cmd_pend_mtx);
2434 2436 mutex_exit(&instance->completed_pool_mtx);
2435 2437
2436 2438 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2437 2439 0, 0, DDI_DMA_SYNC_FORDEV);
2438 2440
2439 2441 if (instance->softint_running) {
2440 2442 need_softintr = 0;
2441 2443 } else {
2442 2444 need_softintr = 1;
2443 2445 }
2444 2446
2445 2447 if (instance->isr_level == HIGH_LEVEL_INTR) {
2446 2448 if (need_softintr) {
2447 2449 ddi_trigger_softintr(instance->soft_intr_id);
2448 2450 }
2449 2451 } else {
2450 2452 /*
2451 2453 * Not a high-level interrupt, therefore call the soft level
2452 2454 * interrupt explicitly
2453 2455 */
2454 2456 (void) mrsas_softintr(instance);
2455 2457 }
2456 2458
2457 2459 return (DDI_INTR_CLAIMED);
2458 2460 }
2459 2461
2460 2462
2461 2463 /*
2462 2464 * ************************************************************************** *
2463 2465 * *
2464 2466 * libraries *
2465 2467 * *
2466 2468 * ************************************************************************** *
2467 2469 */
2468 2470 /*
2469 2471 * get_mfi_pkt : Get a command from the free pool
2470 2472 * After successful allocation, the caller of this routine
2471 2473 * must clear the frame buffer (memset to zero) before
2472 2474 * using the packet further.
2473 2475 *
2474 2476 * ***** Note *****
2475 2477 * After clearing the frame buffer the context id of the
2476 2478 * frame buffer SHOULD be restored back.
2477 2479 */
2478 2480 struct mrsas_cmd *
2479 2481 mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2480 2482 {
2481 2483 mlist_t *head = &instance->cmd_pool_list;
2482 2484 struct mrsas_cmd *cmd = NULL;
2483 2485
2484 2486 mutex_enter(&instance->cmd_pool_mtx);
2485 2487
2486 2488 if (!mlist_empty(head)) {
2487 2489 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2488 2490 mlist_del_init(head->next);
2489 2491 }
2490 2492 if (cmd != NULL) {
2491 2493 cmd->pkt = NULL;
2492 2494 cmd->retry_count_for_ocr = 0;
2493 2495 cmd->drv_pkt_time = 0;
2494 2496
2495 2497 }
2496 2498 mutex_exit(&instance->cmd_pool_mtx);
2497 2499
2498 2500 return (cmd);
2499 2501 }
2500 2502
2501 2503 static struct mrsas_cmd *
2502 2504 get_mfi_app_pkt(struct mrsas_instance *instance)
2503 2505 {
2504 2506 mlist_t *head = &instance->app_cmd_pool_list;
2505 2507 struct mrsas_cmd *cmd = NULL;
2506 2508
2507 2509 mutex_enter(&instance->app_cmd_pool_mtx);
2508 2510
2509 2511 if (!mlist_empty(head)) {
2510 2512 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2511 2513 mlist_del_init(head->next);
2512 2514 }
2513 2515 if (cmd != NULL) {
2514 2516 cmd->pkt = NULL;
2515 2517 cmd->retry_count_for_ocr = 0;
2516 2518 cmd->drv_pkt_time = 0;
2517 2519 }
2518 2520
2519 2521 mutex_exit(&instance->app_cmd_pool_mtx);
2520 2522
2521 2523 return (cmd);
2522 2524 }
2523 2525 /*
2524 2526 * return_mfi_pkt : Return a cmd to free command pool
2525 2527 */
2526 2528 void
2527 2529 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2528 2530 {
2529 2531 mutex_enter(&instance->cmd_pool_mtx);
2530 2532 /* use mlist_add_tail for debug assistance */
2531 2533 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2532 2534
2533 2535 mutex_exit(&instance->cmd_pool_mtx);
2534 2536 }
2535 2537
2536 2538 static void
2537 2539 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2538 2540 {
2539 2541 mutex_enter(&instance->app_cmd_pool_mtx);
2540 2542
2541 2543 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2542 2544
2543 2545 mutex_exit(&instance->app_cmd_pool_mtx);
2544 2546 }
2545 2547 void
2546 2548 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2547 2549 {
2548 2550 struct scsi_pkt *pkt;
2549 2551 struct mrsas_header *hdr;
2550 2552 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2551 2553 mutex_enter(&instance->cmd_pend_mtx);
2552 2554 mlist_del_init(&cmd->list);
2553 2555 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2554 2556 if (cmd->sync_cmd == MRSAS_TRUE) {
2555 2557 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2556 2558 if (hdr) {
2557 2559 con_log(CL_ANN1, (CE_CONT,
2558 2560 "push_pending_mfi_pkt: "
2559 2561 "cmd %p index %x "
2560 2562 "time %llx",
2561 2563 (void *)cmd, cmd->index,
2562 2564 gethrtime()));
2563 2565 /* Wait for specified interval */
2564 2566 cmd->drv_pkt_time = ddi_get16(
2565 2567 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2566 2568 if (cmd->drv_pkt_time < debug_timeout_g)
2567 2569 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2568 2570 con_log(CL_ANN1, (CE_CONT,
2569 2571 "push_pending_pkt(): "
2570 2572 "Called IO Timeout Value %x\n",
2571 2573 cmd->drv_pkt_time));
2572 2574 }
2573 2575 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2574 2576 instance->timeout_id = timeout(io_timeout_checker,
2575 2577 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2576 2578 }
2577 2579 } else {
2578 2580 pkt = cmd->pkt;
2579 2581 if (pkt) {
2580 2582 con_log(CL_ANN1, (CE_CONT,
2581 2583 "push_pending_mfi_pkt: "
2582 2584 "cmd %p index %x pkt %p, "
2583 2585 "time %llx",
2584 2586 (void *)cmd, cmd->index, (void *)pkt,
2585 2587 gethrtime()));
2586 2588 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2587 2589 }
2588 2590 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2589 2591 instance->timeout_id = timeout(io_timeout_checker,
2590 2592 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2591 2593 }
2592 2594 }
2593 2595
2594 2596 mutex_exit(&instance->cmd_pend_mtx);
2595 2597
2596 2598 }
2597 2599
2598 2600 int
2599 2601 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2600 2602 {
2601 2603 mlist_t *head = &instance->cmd_pend_list;
2602 2604 mlist_t *tmp = head;
2603 2605 struct mrsas_cmd *cmd = NULL;
2604 2606 struct mrsas_header *hdr;
2605 2607 unsigned int flag = 1;
2606 2608 struct scsi_pkt *pkt;
2607 2609 int saved_level;
2608 2610 int cmd_count = 0;
2609 2611
2610 2612 saved_level = debug_level_g;
2611 2613 debug_level_g = CL_ANN1;
2612 2614
2613 2615 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2614 2616
2615 2617 while (flag) {
2616 2618 mutex_enter(&instance->cmd_pend_mtx);
2617 2619 tmp = tmp->next;
2618 2620 if (tmp == head) {
2619 2621 mutex_exit(&instance->cmd_pend_mtx);
2620 2622 flag = 0;
2621 2623 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2622 2624 " NO MORE CMDS PENDING....\n"));
2623 2625 break;
2624 2626 } else {
2625 2627 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2626 2628 mutex_exit(&instance->cmd_pend_mtx);
2627 2629 if (cmd) {
2628 2630 if (cmd->sync_cmd == MRSAS_TRUE) {
2629 2631 hdr = (struct mrsas_header *)
2630 2632 &cmd->frame->hdr;
2631 2633 if (hdr) {
2632 2634 con_log(CL_ANN1, (CE_CONT,
2633 2635 "print: cmd %p index 0x%x "
2634 2636 "drv_pkt_time 0x%x (NO-PKT)"
2635 2637 " hdr %p\n", (void *)cmd,
2636 2638 cmd->index,
2637 2639 cmd->drv_pkt_time,
2638 2640 (void *)hdr));
2639 2641 }
2640 2642 } else {
2641 2643 pkt = cmd->pkt;
2642 2644 if (pkt) {
2643 2645 con_log(CL_ANN1, (CE_CONT,
2644 2646 "print: cmd %p index 0x%x "
2645 2647 "drv_pkt_time 0x%x pkt %p \n",
2646 2648 (void *)cmd, cmd->index,
2647 2649 cmd->drv_pkt_time, (void *)pkt));
2648 2650 }
2649 2651 }
2650 2652
2651 2653 if (++cmd_count == 1) {
2652 2654 mrsas_print_cmd_details(instance, cmd,
2653 2655 0xDD);
2654 2656 } else {
2655 2657 mrsas_print_cmd_details(instance, cmd,
2656 2658 1);
2657 2659 }
2658 2660
2659 2661 }
2660 2662 }
2661 2663 }
2662 2664 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2663 2665
2664 2666
2665 2667 debug_level_g = saved_level;
2666 2668
2667 2669 return (DDI_SUCCESS);
2668 2670 }
2669 2671
2670 2672
2671 2673 int
2672 2674 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2673 2675 {
2674 2676
2675 2677 struct mrsas_cmd *cmd = NULL;
2676 2678 struct scsi_pkt *pkt;
2677 2679 struct mrsas_header *hdr;
2678 2680
2679 2681 struct mlist_head *pos, *next;
2680 2682
2681 2683 con_log(CL_ANN1, (CE_NOTE,
2682 2684 "mrsas_complete_pending_cmds(): Called"));
2683 2685
2684 2686 mutex_enter(&instance->cmd_pend_mtx);
2685 2687 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2686 2688 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2687 2689 if (cmd) {
2688 2690 pkt = cmd->pkt;
2689 2691 if (pkt) { /* for IO */
2690 2692 if (((pkt->pkt_flags & FLAG_NOINTR)
2691 2693 == 0) && pkt->pkt_comp) {
2692 2694 pkt->pkt_reason
2693 2695 = CMD_DEV_GONE;
2694 2696 pkt->pkt_statistics
2695 2697 = STAT_DISCON;
2696 2698 con_log(CL_ANN1, (CE_CONT,
2697 2699 "fail and posting to scsa "
2698 2700 "cmd %p index %x"
2699 2701 " pkt %p "
2700 2702 "time : %llx",
2701 2703 (void *)cmd, cmd->index,
2702 2704 (void *)pkt, gethrtime()));
2703 2705 (*pkt->pkt_comp)(pkt);
2704 2706 }
2705 2707 } else { /* for DCMDS */
2706 2708 if (cmd->sync_cmd == MRSAS_TRUE) {
2707 2709 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2708 2710 con_log(CL_ANN1, (CE_CONT,
2709 2711 "posting invalid status to application "
2710 2712 "cmd %p index %x"
2711 2713 " hdr %p "
2712 2714 "time : %llx",
2713 2715 (void *)cmd, cmd->index,
2714 2716 (void *)hdr, gethrtime()));
2715 2717 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2716 2718 complete_cmd_in_sync_mode(instance, cmd);
2717 2719 }
2718 2720 }
2719 2721 mlist_del_init(&cmd->list);
2720 2722 } else {
2721 2723 con_log(CL_ANN1, (CE_CONT,
2722 2724 "mrsas_complete_pending_cmds:"
2723 2725 "NULL command\n"));
2724 2726 }
2725 2727 con_log(CL_ANN1, (CE_CONT,
2726 2728 "mrsas_complete_pending_cmds:"
2727 2729 "looping for more commands\n"));
2728 2730 }
2729 2731 mutex_exit(&instance->cmd_pend_mtx);
2730 2732
2731 2733 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2732 2734 return (DDI_SUCCESS);
2733 2735 }
2734 2736
2735 2737 void
2736 2738 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2737 2739 int detail)
2738 2740 {
2739 2741 struct scsi_pkt *pkt = cmd->pkt;
2740 2742 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2741 2743 int i;
2742 2744 int saved_level;
2743 2745 ddi_acc_handle_t acc_handle =
2744 2746 instance->mpi2_frame_pool_dma_obj.acc_handle;
2745 2747
2746 2748 if (detail == 0xDD) {
2747 2749 saved_level = debug_level_g;
2748 2750 debug_level_g = CL_ANN1;
2749 2751 }
2750 2752
2751 2753
2752 2754 if (instance->tbolt) {
2753 2755 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2754 2756 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2755 2757 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2756 2758 } else {
2757 2759 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2758 2760 "cmd->index 0x%x timer 0x%x sec\n",
2759 2761 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2760 2762 }
2761 2763
2762 2764 if (pkt) {
2763 2765 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2764 2766 pkt->pkt_cdbp[0]));
2765 2767 } else {
2766 2768 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2767 2769 }
2768 2770
2769 2771 if ((detail == 0xDD) && instance->tbolt) {
2770 2772 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2771 2773 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2772 2774 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2773 2775 ddi_get16(acc_handle, &scsi_io->DevHandle),
2774 2776 ddi_get8(acc_handle, &scsi_io->Function),
2775 2777 ddi_get16(acc_handle, &scsi_io->IoFlags),
2776 2778 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2777 2779 ddi_get32(acc_handle, &scsi_io->DataLength)));
2778 2780
2779 2781 for (i = 0; i < 32; i++) {
2780 2782 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2781 2783 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2782 2784 }
2783 2785
2784 2786 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2785 2787 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2786 2788 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2787 2789 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2788 2790 " regLockLength=0x%X spanArm=0x%X\n",
2789 2791 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2790 2792 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2791 2793 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2792 2794 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2793 2795 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2794 2796 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2795 2797 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2796 2798 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2797 2799 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2798 2800 }
2799 2801
2800 2802 if (detail == 0xDD) {
2801 2803 debug_level_g = saved_level;
2802 2804 }
2803 2805 }
2804 2806
2805 2807
2806 2808 int
2807 2809 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2808 2810 {
2809 2811 mlist_t *head = &instance->cmd_pend_list;
2810 2812 mlist_t *tmp = head->next;
2811 2813 struct mrsas_cmd *cmd = NULL;
2812 2814 struct scsi_pkt *pkt;
2813 2815
2814 2816 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2815 2817 while (tmp != head) {
2816 2818 mutex_enter(&instance->cmd_pend_mtx);
2817 2819 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2818 2820 tmp = tmp->next;
2819 2821 mutex_exit(&instance->cmd_pend_mtx);
2820 2822 if (cmd) {
2821 2823 con_log(CL_ANN1, (CE_CONT,
2822 2824 "mrsas_issue_pending_cmds(): "
2823 2825 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2824 2826 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2825 2827
2826 2828 /* Reset command timeout value */
2827 2829 if (cmd->drv_pkt_time < debug_timeout_g)
2828 2830 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2829 2831
2830 2832 cmd->retry_count_for_ocr++;
2831 2833
2832 2834 cmn_err(CE_CONT, "cmd retry count = %d\n",
2833 2835 cmd->retry_count_for_ocr);
2834 2836
2835 2837 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2836 2838 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2837 2839 "cmd->retry_count exceeded limit >%d\n",
2838 2840 IO_RETRY_COUNT);
2839 2841 mrsas_print_cmd_details(instance, cmd, 0xDD);
2840 2842
2841 2843 cmn_err(CE_WARN,
2842 2844 "mrsas_issue_pending_cmds():"
2843 2845 "Calling KILL Adapter\n");
2844 2846 if (instance->tbolt)
2845 2847 mrsas_tbolt_kill_adapter(instance);
2846 2848 else
2847 2849 (void) mrsas_kill_adapter(instance);
2848 2850 return (DDI_FAILURE);
2849 2851 }
2850 2852
2851 2853 pkt = cmd->pkt;
2852 2854 if (pkt) {
2853 2855 con_log(CL_ANN1, (CE_CONT,
2854 2856 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2855 2857 "pkt %p time %llx",
2856 2858 (void *)cmd, cmd->index,
2857 2859 (void *)pkt,
2858 2860 gethrtime()));
2859 2861
2860 2862 } else {
2861 2863 cmn_err(CE_CONT,
2862 2864 "mrsas_issue_pending_cmds(): NO-PKT, "
2863 2865 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2864 2866 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2865 2867 }
2866 2868
2867 2869
2868 2870 if (cmd->sync_cmd == MRSAS_TRUE) {
2869 2871 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2870 2872 "SYNC_CMD == TRUE \n");
2871 2873 instance->func_ptr->issue_cmd_in_sync_mode(
2872 2874 instance, cmd);
2873 2875 } else {
2874 2876 instance->func_ptr->issue_cmd(cmd, instance);
2875 2877 }
2876 2878 } else {
2877 2879 con_log(CL_ANN1, (CE_CONT,
2878 2880 "mrsas_issue_pending_cmds: NULL command\n"));
2879 2881 }
2880 2882 con_log(CL_ANN1, (CE_CONT,
2881 2883 "mrsas_issue_pending_cmds:"
2882 2884 "looping for more commands"));
2883 2885 }
2884 2886 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2885 2887 return (DDI_SUCCESS);
2886 2888 }
2887 2889
2888 2890
2889 2891
2890 2892 /*
2891 2893 * destroy_mfi_frame_pool
2892 2894 */
2893 2895 void
2894 2896 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2895 2897 {
2896 2898 int i;
2897 2899 uint32_t max_cmd = instance->max_fw_cmds;
2898 2900
2899 2901 struct mrsas_cmd *cmd;
2900 2902
2901 2903 /* return all frames to pool */
2902 2904
2903 2905 for (i = 0; i < max_cmd; i++) {
2904 2906
2905 2907 cmd = instance->cmd_list[i];
2906 2908
2907 2909 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2908 2910 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2909 2911
2910 2912 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2911 2913 }
2912 2914
2913 2915 }
2914 2916
2915 2917 /*
2916 2918 * create_mfi_frame_pool
2917 2919 */
2918 2920 int
2919 2921 create_mfi_frame_pool(struct mrsas_instance *instance)
2920 2922 {
2921 2923 int i = 0;
2922 2924 int cookie_cnt;
2923 2925 uint16_t max_cmd;
2924 2926 uint16_t sge_sz;
2925 2927 uint32_t sgl_sz;
2926 2928 uint32_t tot_frame_size;
2927 2929 struct mrsas_cmd *cmd;
2928 2930 int retval = DDI_SUCCESS;
2929 2931
2930 2932 max_cmd = instance->max_fw_cmds;
2931 2933 sge_sz = sizeof (struct mrsas_sge_ieee);
2932 2934 /* calculated the number of 64byte frames required for SGL */
2933 2935 sgl_sz = sge_sz * instance->max_num_sge;
2934 2936 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2935 2937
2936 2938 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2937 2939 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2938 2940
2939 2941 while (i < max_cmd) {
2940 2942 cmd = instance->cmd_list[i];
2941 2943
2942 2944 cmd->frame_dma_obj.size = tot_frame_size;
2943 2945 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2944 2946 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2945 2947 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2946 2948 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2947 2949 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2948 2950
2949 2951 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2950 2952 (uchar_t)DDI_STRUCTURE_LE_ACC);
2951 2953
2952 2954 if (cookie_cnt == -1 || cookie_cnt > 1) {
2953 2955 cmn_err(CE_WARN,
2954 2956 "create_mfi_frame_pool: could not alloc.");
2955 2957 retval = DDI_FAILURE;
2956 2958 goto mrsas_undo_frame_pool;
2957 2959 }
2958 2960
2959 2961 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2960 2962
2961 2963 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2962 2964 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2963 2965 cmd->frame_phys_addr =
2964 2966 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2965 2967
2966 2968 cmd->sense = (uint8_t *)(((unsigned long)
2967 2969 cmd->frame_dma_obj.buffer) +
2968 2970 tot_frame_size - SENSE_LENGTH);
2969 2971 cmd->sense_phys_addr =
2970 2972 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2971 2973 tot_frame_size - SENSE_LENGTH;
2972 2974
2973 2975 if (!cmd->frame || !cmd->sense) {
2974 2976 cmn_err(CE_WARN,
2975 2977 "mr_sas: pci_pool_alloc failed");
2976 2978 retval = ENOMEM;
2977 2979 goto mrsas_undo_frame_pool;
2978 2980 }
2979 2981
2980 2982 ddi_put32(cmd->frame_dma_obj.acc_handle,
2981 2983 &cmd->frame->io.context, cmd->index);
2982 2984 i++;
2983 2985
2984 2986 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2985 2987 cmd->index, cmd->frame_phys_addr));
2986 2988 }
2987 2989
2988 2990 return (DDI_SUCCESS);
2989 2991
2990 2992 mrsas_undo_frame_pool:
2991 2993 if (i > 0)
2992 2994 destroy_mfi_frame_pool(instance);
2993 2995
2994 2996 return (retval);
2995 2997 }
2996 2998
2997 2999 /*
2998 3000 * free_additional_dma_buffer
2999 3001 */
3000 3002 static void
3001 3003 free_additional_dma_buffer(struct mrsas_instance *instance)
3002 3004 {
3003 3005 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3004 3006 (void) mrsas_free_dma_obj(instance,
3005 3007 instance->mfi_internal_dma_obj);
3006 3008 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3007 3009 }
3008 3010
3009 3011 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
3010 3012 (void) mrsas_free_dma_obj(instance,
3011 3013 instance->mfi_evt_detail_obj);
3012 3014 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
3013 3015 }
3014 3016 }
3015 3017
3016 3018 /*
3017 3019 * alloc_additional_dma_buffer
3018 3020 */
3019 3021 static int
3020 3022 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3021 3023 {
3022 3024 uint32_t reply_q_sz;
3023 3025 uint32_t internal_buf_size = PAGESIZE*2;
3024 3026
3025 3027 /* max cmds plus 1 + producer & consumer */
3026 3028 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3027 3029
3028 3030 instance->mfi_internal_dma_obj.size = internal_buf_size;
3029 3031 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3030 3032 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3031 3033 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3032 3034 0xFFFFFFFFU;
3033 3035 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3034 3036
3035 3037 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3036 3038 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3037 3039 cmn_err(CE_WARN,
3038 3040 "mr_sas: could not alloc reply queue");
3039 3041 return (DDI_FAILURE);
3040 3042 }
3041 3043
3042 3044 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3043 3045
3044 3046 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3045 3047
3046 3048 instance->producer = (uint32_t *)((unsigned long)
3047 3049 instance->mfi_internal_dma_obj.buffer);
3048 3050 instance->consumer = (uint32_t *)((unsigned long)
3049 3051 instance->mfi_internal_dma_obj.buffer + 4);
3050 3052 instance->reply_queue = (uint32_t *)((unsigned long)
3051 3053 instance->mfi_internal_dma_obj.buffer + 8);
3052 3054 instance->internal_buf = (caddr_t)(((unsigned long)
3053 3055 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3054 3056 instance->internal_buf_dmac_add =
3055 3057 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3056 3058 (reply_q_sz + 8);
3057 3059 instance->internal_buf_size = internal_buf_size -
3058 3060 (reply_q_sz + 8);
3059 3061
3060 3062 /* allocate evt_detail */
3061 3063 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3062 3064 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3063 3065 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3064 3066 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3065 3067 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3066 3068 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3067 3069
3068 3070 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3069 3071 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3070 3072 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3071 3073 "could not allocate data transfer buffer.");
3072 3074 goto mrsas_undo_internal_buff;
3073 3075 }
3074 3076
3075 3077 bzero(instance->mfi_evt_detail_obj.buffer,
3076 3078 sizeof (struct mrsas_evt_detail));
3077 3079
3078 3080 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3079 3081
3080 3082 return (DDI_SUCCESS);
3081 3083
3082 3084 mrsas_undo_internal_buff:
3083 3085 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3084 3086 (void) mrsas_free_dma_obj(instance,
3085 3087 instance->mfi_internal_dma_obj);
3086 3088 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3087 3089 }
3088 3090
3089 3091 return (DDI_FAILURE);
3090 3092 }
3091 3093
3092 3094
3093 3095 void
3094 3096 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3095 3097 {
3096 3098 int i;
3097 3099 uint32_t max_cmd;
3098 3100 size_t sz;
3099 3101
3100 3102 /* already freed */
3101 3103 if (instance->cmd_list == NULL) {
3102 3104 return;
3103 3105 }
3104 3106
3105 3107 max_cmd = instance->max_fw_cmds;
3106 3108
3107 3109 /* size of cmd_list array */
3108 3110 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3109 3111
3110 3112 /* First free each cmd */
3111 3113 for (i = 0; i < max_cmd; i++) {
3112 3114 if (instance->cmd_list[i] != NULL) {
3113 3115 kmem_free(instance->cmd_list[i],
3114 3116 sizeof (struct mrsas_cmd));
3115 3117 }
3116 3118
3117 3119 instance->cmd_list[i] = NULL;
3118 3120 }
3119 3121
3120 3122 /* Now, free cmd_list array */
3121 3123 if (instance->cmd_list != NULL)
3122 3124 kmem_free(instance->cmd_list, sz);
3123 3125
3124 3126 instance->cmd_list = NULL;
3125 3127
3126 3128 INIT_LIST_HEAD(&instance->cmd_pool_list);
3127 3129 INIT_LIST_HEAD(&instance->cmd_pend_list);
3128 3130 if (instance->tbolt) {
3129 3131 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3130 3132 } else {
3131 3133 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3132 3134 }
3133 3135
3134 3136 }
3135 3137
3136 3138
3137 3139 /*
3138 3140 * mrsas_alloc_cmd_pool
3139 3141 */
3140 3142 int
3141 3143 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3142 3144 {
3143 3145 int i;
3144 3146 int count;
3145 3147 uint32_t max_cmd;
3146 3148 uint32_t reserve_cmd;
3147 3149 size_t sz;
3148 3150
3149 3151 struct mrsas_cmd *cmd;
3150 3152
3151 3153 max_cmd = instance->max_fw_cmds;
3152 3154 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3153 3155 "max_cmd %x", max_cmd));
3154 3156
3155 3157
3156 3158 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3157 3159
3158 3160 /*
3159 3161 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3160 3162 * Allocate the dynamic array first and then allocate individual
3161 3163 * commands.
3162 3164 */
3163 3165 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3164 3166 ASSERT(instance->cmd_list);
3165 3167
3166 3168 /* create a frame pool and assign one frame to each cmd */
3167 3169 for (count = 0; count < max_cmd; count++) {
3168 3170 instance->cmd_list[count] =
3169 3171 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3170 3172 ASSERT(instance->cmd_list[count]);
3171 3173 }
3172 3174
3173 3175 /* add all the commands to command pool */
3174 3176
3175 3177 INIT_LIST_HEAD(&instance->cmd_pool_list);
3176 3178 INIT_LIST_HEAD(&instance->cmd_pend_list);
3177 3179 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3178 3180
3179 3181 /*
3180 3182 * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3181 3183 * into app_cmd and regular cmd? For now, just take
3182 3184 * max(1/8th of max, 4);
3183 3185 */
3184 3186 reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3185 3187 max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3186 3188
3187 3189 for (i = 0; i < reserve_cmd; i++) {
3188 3190 cmd = instance->cmd_list[i];
3189 3191 cmd->index = i;
3190 3192 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3191 3193 }
3192 3194
3193 3195
3194 3196 for (i = reserve_cmd; i < max_cmd; i++) {
3195 3197 cmd = instance->cmd_list[i];
3196 3198 cmd->index = i;
3197 3199 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3198 3200 }
3199 3201
3200 3202 return (DDI_SUCCESS);
3201 3203
3202 3204 mrsas_undo_cmds:
3203 3205 if (count > 0) {
3204 3206 /* free each cmd */
3205 3207 for (i = 0; i < count; i++) {
3206 3208 if (instance->cmd_list[i] != NULL) {
3207 3209 kmem_free(instance->cmd_list[i],
3208 3210 sizeof (struct mrsas_cmd));
3209 3211 }
3210 3212 instance->cmd_list[i] = NULL;
3211 3213 }
3212 3214 }
3213 3215
3214 3216 mrsas_undo_cmd_list:
3215 3217 if (instance->cmd_list != NULL)
3216 3218 kmem_free(instance->cmd_list, sz);
3217 3219 instance->cmd_list = NULL;
3218 3220
3219 3221 return (DDI_FAILURE);
3220 3222 }
3221 3223
3222 3224
3223 3225 /*
3224 3226 * free_space_for_mfi
3225 3227 */
3226 3228 static void
3227 3229 free_space_for_mfi(struct mrsas_instance *instance)
3228 3230 {
3229 3231
3230 3232 /* already freed */
3231 3233 if (instance->cmd_list == NULL) {
3232 3234 return;
3233 3235 }
3234 3236
3235 3237 /* Free additional dma buffer */
3236 3238 free_additional_dma_buffer(instance);
3237 3239
3238 3240 /* Free the MFI frame pool */
3239 3241 destroy_mfi_frame_pool(instance);
3240 3242
3241 3243 /* Free all the commands in the cmd_list */
3242 3244 /* Free the cmd_list buffer itself */
3243 3245 mrsas_free_cmd_pool(instance);
3244 3246 }
3245 3247
3246 3248 /*
3247 3249 * alloc_space_for_mfi
3248 3250 */
3249 3251 static int
3250 3252 alloc_space_for_mfi(struct mrsas_instance *instance)
3251 3253 {
3252 3254 /* Allocate command pool (memory for cmd_list & individual commands) */
3253 3255 if (mrsas_alloc_cmd_pool(instance)) {
3254 3256 cmn_err(CE_WARN, "error creating cmd pool");
3255 3257 return (DDI_FAILURE);
3256 3258 }
3257 3259
3258 3260 /* Allocate MFI Frame pool */
3259 3261 if (create_mfi_frame_pool(instance)) {
3260 3262 cmn_err(CE_WARN, "error creating frame DMA pool");
3261 3263 goto mfi_undo_cmd_pool;
3262 3264 }
3263 3265
3264 3266 /* Allocate additional DMA buffer */
3265 3267 if (alloc_additional_dma_buffer(instance)) {
3266 3268 cmn_err(CE_WARN, "error creating frame DMA pool");
3267 3269 goto mfi_undo_frame_pool;
3268 3270 }
3269 3271
3270 3272 return (DDI_SUCCESS);
3271 3273
3272 3274 mfi_undo_frame_pool:
3273 3275 destroy_mfi_frame_pool(instance);
3274 3276
3275 3277 mfi_undo_cmd_pool:
3276 3278 mrsas_free_cmd_pool(instance);
3277 3279
3278 3280 return (DDI_FAILURE);
3279 3281 }
3280 3282
3281 3283
3282 3284
3283 3285 /*
3284 3286 * get_ctrl_info
3285 3287 */
3286 3288 static int
3287 3289 get_ctrl_info(struct mrsas_instance *instance,
3288 3290 struct mrsas_ctrl_info *ctrl_info)
3289 3291 {
3290 3292 int ret = 0;
3291 3293
3292 3294 struct mrsas_cmd *cmd;
3293 3295 struct mrsas_dcmd_frame *dcmd;
3294 3296 struct mrsas_ctrl_info *ci;
3295 3297
3296 3298 if (instance->tbolt) {
3297 3299 cmd = get_raid_msg_mfi_pkt(instance);
3298 3300 } else {
3299 3301 cmd = mrsas_get_mfi_pkt(instance);
3300 3302 }
3301 3303
3302 3304 if (!cmd) {
3303 3305 con_log(CL_ANN, (CE_WARN,
3304 3306 "Failed to get a cmd for ctrl info"));
3305 3307 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3306 3308 uint16_t, instance->max_fw_cmds);
3307 3309 return (DDI_FAILURE);
3308 3310 }
3309 3311
3310 3312 /* Clear the frame buffer and assign back the context id */
3311 3313 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3312 3314 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3313 3315 cmd->index);
3314 3316
3315 3317 dcmd = &cmd->frame->dcmd;
3316 3318
3317 3319 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3318 3320
3319 3321 if (!ci) {
3320 3322 cmn_err(CE_WARN,
3321 3323 "Failed to alloc mem for ctrl info");
3322 3324 mrsas_return_mfi_pkt(instance, cmd);
3323 3325 return (DDI_FAILURE);
3324 3326 }
3325 3327
3326 3328 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3327 3329
3328 3330 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3329 3331 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3330 3332
3331 3333 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3332 3334 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3333 3335 MFI_CMD_STATUS_POLL_MODE);
3334 3336 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3335 3337 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3336 3338 MFI_FRAME_DIR_READ);
3337 3339 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3338 3340 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3339 3341 sizeof (struct mrsas_ctrl_info));
3340 3342 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3341 3343 MR_DCMD_CTRL_GET_INFO);
3342 3344 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3343 3345 instance->internal_buf_dmac_add);
3344 3346 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3345 3347 sizeof (struct mrsas_ctrl_info));
3346 3348
3347 3349 cmd->frame_count = 1;
3348 3350
3349 3351 if (instance->tbolt) {
3350 3352 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3351 3353 }
3352 3354
3353 3355 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3354 3356 ret = 0;
3355 3357
3356 3358 ctrl_info->max_request_size = ddi_get32(
3357 3359 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3358 3360
3359 3361 ctrl_info->ld_present_count = ddi_get16(
3360 3362 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3361 3363
3362 3364 ctrl_info->properties.on_off_properties = ddi_get32(
3363 3365 cmd->frame_dma_obj.acc_handle,
3364 3366 &ci->properties.on_off_properties);
3365 3367 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3366 3368 (uint8_t *)(ctrl_info->product_name),
3367 3369 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3368 3370 DDI_DEV_AUTOINCR);
3369 3371 /* should get more members of ci with ddi_get when needed */
3370 3372 } else {
3371 3373 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3372 3374 ret = -1;
3373 3375 }
3374 3376
3375 3377 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3376 3378 ret = -1;
3377 3379 }
3378 3380 if (instance->tbolt) {
3379 3381 return_raid_msg_mfi_pkt(instance, cmd);
3380 3382 } else {
3381 3383 mrsas_return_mfi_pkt(instance, cmd);
3382 3384 }
3383 3385
3384 3386 return (ret);
3385 3387 }
3386 3388
3387 3389 /*
3388 3390 * abort_aen_cmd
3389 3391 */
3390 3392 static int
3391 3393 abort_aen_cmd(struct mrsas_instance *instance,
3392 3394 struct mrsas_cmd *cmd_to_abort)
3393 3395 {
3394 3396 int ret = 0;
3395 3397
3396 3398 struct mrsas_cmd *cmd;
3397 3399 struct mrsas_abort_frame *abort_fr;
3398 3400
3399 3401 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3400 3402
3401 3403 if (instance->tbolt) {
3402 3404 cmd = get_raid_msg_mfi_pkt(instance);
3403 3405 } else {
3404 3406 cmd = mrsas_get_mfi_pkt(instance);
3405 3407 }
3406 3408
3407 3409 if (!cmd) {
3408 3410 con_log(CL_ANN1, (CE_WARN,
3409 3411 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3410 3412 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3411 3413 uint16_t, instance->max_fw_cmds);
3412 3414 return (DDI_FAILURE);
3413 3415 }
3414 3416
3415 3417 /* Clear the frame buffer and assign back the context id */
3416 3418 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3417 3419 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3418 3420 cmd->index);
3419 3421
3420 3422 abort_fr = &cmd->frame->abort;
3421 3423
3422 3424 /* prepare and issue the abort frame */
3423 3425 ddi_put8(cmd->frame_dma_obj.acc_handle,
3424 3426 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3425 3427 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3426 3428 MFI_CMD_STATUS_SYNC_MODE);
3427 3429 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3428 3430 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3429 3431 cmd_to_abort->index);
3430 3432 ddi_put32(cmd->frame_dma_obj.acc_handle,
3431 3433 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3432 3434 ddi_put32(cmd->frame_dma_obj.acc_handle,
3433 3435 &abort_fr->abort_mfi_phys_addr_hi, 0);
3434 3436
3435 3437 instance->aen_cmd->abort_aen = 1;
3436 3438
3437 3439 cmd->frame_count = 1;
3438 3440
3439 3441 if (instance->tbolt) {
3440 3442 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3441 3443 }
3442 3444
3443 3445 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3444 3446 con_log(CL_ANN1, (CE_WARN,
3445 3447 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3446 3448 ret = -1;
3447 3449 } else {
3448 3450 ret = 0;
3449 3451 }
3450 3452
3451 3453 instance->aen_cmd->abort_aen = 1;
3452 3454 instance->aen_cmd = 0;
3453 3455
3454 3456 if (instance->tbolt) {
3455 3457 return_raid_msg_mfi_pkt(instance, cmd);
3456 3458 } else {
3457 3459 mrsas_return_mfi_pkt(instance, cmd);
3458 3460 }
3459 3461
3460 3462 atomic_add_16(&instance->fw_outstanding, (-1));
3461 3463
3462 3464 return (ret);
3463 3465 }
3464 3466
3465 3467
3466 3468 static int
3467 3469 mrsas_build_init_cmd(struct mrsas_instance *instance,
3468 3470 struct mrsas_cmd **cmd_ptr)
3469 3471 {
3470 3472 struct mrsas_cmd *cmd;
3471 3473 struct mrsas_init_frame *init_frame;
3472 3474 struct mrsas_init_queue_info *initq_info;
3473 3475 struct mrsas_drv_ver drv_ver_info;
3474 3476
3475 3477
3476 3478 /*
3477 3479 * Prepare a init frame. Note the init frame points to queue info
3478 3480 * structure. Each frame has SGL allocated after first 64 bytes. For
3479 3481 * this frame - since we don't need any SGL - we use SGL's space as
3480 3482 * queue info structure
3481 3483 */
3482 3484 cmd = *cmd_ptr;
3483 3485
3484 3486
3485 3487 /* Clear the frame buffer and assign back the context id */
3486 3488 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3487 3489 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3488 3490 cmd->index);
3489 3491
3490 3492 init_frame = (struct mrsas_init_frame *)cmd->frame;
3491 3493 initq_info = (struct mrsas_init_queue_info *)
3492 3494 ((unsigned long)init_frame + 64);
3493 3495
3494 3496 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3495 3497 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3496 3498
3497 3499 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3498 3500
3499 3501 ddi_put32(cmd->frame_dma_obj.acc_handle,
3500 3502 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3501 3503
3502 3504 ddi_put32(cmd->frame_dma_obj.acc_handle,
3503 3505 &initq_info->producer_index_phys_addr_hi, 0);
3504 3506 ddi_put32(cmd->frame_dma_obj.acc_handle,
3505 3507 &initq_info->producer_index_phys_addr_lo,
3506 3508 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3507 3509
3508 3510 ddi_put32(cmd->frame_dma_obj.acc_handle,
3509 3511 &initq_info->consumer_index_phys_addr_hi, 0);
3510 3512 ddi_put32(cmd->frame_dma_obj.acc_handle,
3511 3513 &initq_info->consumer_index_phys_addr_lo,
3512 3514 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3513 3515
3514 3516 ddi_put32(cmd->frame_dma_obj.acc_handle,
3515 3517 &initq_info->reply_queue_start_phys_addr_hi, 0);
3516 3518 ddi_put32(cmd->frame_dma_obj.acc_handle,
3517 3519 &initq_info->reply_queue_start_phys_addr_lo,
3518 3520 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3519 3521
3520 3522 ddi_put8(cmd->frame_dma_obj.acc_handle,
3521 3523 &init_frame->cmd, MFI_CMD_OP_INIT);
3522 3524 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3523 3525 MFI_CMD_STATUS_POLL_MODE);
3524 3526 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3525 3527 ddi_put32(cmd->frame_dma_obj.acc_handle,
3526 3528 &init_frame->queue_info_new_phys_addr_lo,
3527 3529 cmd->frame_phys_addr + 64);
3528 3530 ddi_put32(cmd->frame_dma_obj.acc_handle,
3529 3531 &init_frame->queue_info_new_phys_addr_hi, 0);
3530 3532
3531 3533
3532 3534 /* fill driver version information */
3533 3535 fill_up_drv_ver(&drv_ver_info);
3534 3536
3535 3537 /* allocate the driver version data transfer buffer */
3536 3538 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3537 3539 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3538 3540 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3539 3541 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3540 3542 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3541 3543 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3542 3544
3543 3545 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3544 3546 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3545 3547 con_log(CL_ANN, (CE_WARN,
3546 3548 "init_mfi : Could not allocate driver version buffer."));
3547 3549 return (DDI_FAILURE);
3548 3550 }
3549 3551 /* copy driver version to dma buffer */
3550 3552 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3551 3553 sizeof (drv_ver_info.drv_ver));
3552 3554 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3553 3555 (uint8_t *)drv_ver_info.drv_ver,
3554 3556 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3555 3557 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3556 3558
3557 3559
3558 3560 /* copy driver version physical address to init frame */
3559 3561 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3560 3562 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3561 3563
3562 3564 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3563 3565 sizeof (struct mrsas_init_queue_info));
3564 3566
3565 3567 cmd->frame_count = 1;
3566 3568
3567 3569 *cmd_ptr = cmd;
3568 3570
3569 3571 return (DDI_SUCCESS);
3570 3572 }
3571 3573
3572 3574
3573 3575 /*
3574 3576 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3575 3577 */
3576 3578 int
3577 3579 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3578 3580 {
3579 3581 struct mrsas_cmd *cmd;
3580 3582
3581 3583 /*
3582 3584 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3583 3585 * frames etc
3584 3586 */
3585 3587 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3586 3588 con_log(CL_ANN, (CE_NOTE,
3587 3589 "Error, failed to allocate memory for MFI adapter"));
3588 3590 return (DDI_FAILURE);
3589 3591 }
3590 3592
3591 3593 /* Build INIT command */
3592 3594 cmd = mrsas_get_mfi_pkt(instance);
3593 3595 if (cmd == NULL) {
3594 3596 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3595 3597 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3596 3598 return (DDI_FAILURE);
3597 3599 }
3598 3600
3599 3601 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3600 3602 con_log(CL_ANN,
3601 3603 (CE_NOTE, "Error, failed to build INIT command"));
3602 3604
3603 3605 goto fail_undo_alloc_mfi_space;
3604 3606 }
3605 3607
3606 3608 /*
3607 3609 * Disable interrupt before sending init frame ( see linux driver code)
3608 3610 * send INIT MFI frame in polled mode
3609 3611 */
3610 3612 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3611 3613 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3612 3614 goto fail_fw_init;
3613 3615 }
3614 3616
3615 3617 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3616 3618 goto fail_fw_init;
3617 3619 mrsas_return_mfi_pkt(instance, cmd);
3618 3620
3619 3621 if (ctio_enable &&
3620 3622 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3621 3623 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3622 3624 instance->flag_ieee = 1;
3623 3625 } else {
3624 3626 instance->flag_ieee = 0;
3625 3627 }
3626 3628
3627 3629 ASSERT(!instance->skinny || instance->flag_ieee);
3628 3630
3629 3631 instance->unroll.alloc_space_mfi = 1;
3630 3632 instance->unroll.verBuff = 1;
3631 3633
3632 3634 return (DDI_SUCCESS);
3633 3635
3634 3636
3635 3637 fail_fw_init:
3636 3638 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3637 3639
3638 3640 fail_undo_alloc_mfi_space:
3639 3641 mrsas_return_mfi_pkt(instance, cmd);
3640 3642 free_space_for_mfi(instance);
3641 3643
3642 3644 return (DDI_FAILURE);
3643 3645
3644 3646 }
3645 3647
3646 3648 /*
3647 3649 * mrsas_init_adapter - Initialize adapter.
3648 3650 */
3649 3651 int
3650 3652 mrsas_init_adapter(struct mrsas_instance *instance)
3651 3653 {
3652 3654 struct mrsas_ctrl_info ctrl_info;
3653 3655
3654 3656
3655 3657 /* we expect the FW state to be READY */
3656 3658 if (mfi_state_transition_to_ready(instance)) {
3657 3659 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3658 3660 return (DDI_FAILURE);
3659 3661 }
3660 3662
3661 3663 /* get various operational parameters from status register */
3662 3664 instance->max_num_sge =
3663 3665 (instance->func_ptr->read_fw_status_reg(instance) &
3664 3666 0xFF0000) >> 0x10;
3665 3667 instance->max_num_sge =
3666 3668 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3667 3669 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3668 3670
3669 3671 /*
3670 3672 * Reduce the max supported cmds by 1. This is to ensure that the
3671 3673 * reply_q_sz (1 more than the max cmd that driver may send)
3672 3674 * does not exceed max cmds that the FW can support
3673 3675 */
3674 3676 instance->max_fw_cmds =
3675 3677 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3676 3678 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3677 3679
3678 3680
3679 3681
3680 3682 /* Initialize adapter */
3681 3683 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3682 3684 con_log(CL_ANN,
3683 3685 (CE_WARN, "mr_sas: could not initialize adapter"));
3684 3686 return (DDI_FAILURE);
3685 3687 }
3686 3688
3687 3689 /* gather misc FW related information */
3688 3690 instance->disable_online_ctrl_reset = 0;
3689 3691
3690 3692 if (!get_ctrl_info(instance, &ctrl_info)) {
3691 3693 instance->max_sectors_per_req = ctrl_info.max_request_size;
3692 3694 con_log(CL_ANN1, (CE_NOTE,
3693 3695 "product name %s ld present %d",
3694 3696 ctrl_info.product_name, ctrl_info.ld_present_count));
3695 3697 } else {
3696 3698 instance->max_sectors_per_req = instance->max_num_sge *
3697 3699 PAGESIZE / 512;
3698 3700 }
3699 3701
3700 3702 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3701 3703 instance->disable_online_ctrl_reset = 1;
3702 3704
3703 3705 return (DDI_SUCCESS);
3704 3706
3705 3707 }
3706 3708
3707 3709
3708 3710
3709 3711 static int
3710 3712 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3711 3713 {
3712 3714 struct mrsas_cmd *cmd;
3713 3715 struct mrsas_init_frame *init_frame;
3714 3716 struct mrsas_init_queue_info *initq_info;
3715 3717
3716 3718 /*
3717 3719 * Prepare a init frame. Note the init frame points to queue info
3718 3720 * structure. Each frame has SGL allocated after first 64 bytes. For
3719 3721 * this frame - since we don't need any SGL - we use SGL's space as
3720 3722 * queue info structure
3721 3723 */
3722 3724 con_log(CL_ANN1, (CE_NOTE,
3723 3725 "mrsas_issue_init_mfi: entry\n"));
3724 3726 cmd = get_mfi_app_pkt(instance);
3725 3727
3726 3728 if (!cmd) {
3727 3729 con_log(CL_ANN1, (CE_WARN,
3728 3730 "mrsas_issue_init_mfi: get_pkt failed\n"));
3729 3731 return (DDI_FAILURE);
3730 3732 }
3731 3733
3732 3734 /* Clear the frame buffer and assign back the context id */
3733 3735 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3734 3736 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3735 3737 cmd->index);
3736 3738
3737 3739 init_frame = (struct mrsas_init_frame *)cmd->frame;
3738 3740 initq_info = (struct mrsas_init_queue_info *)
3739 3741 ((unsigned long)init_frame + 64);
3740 3742
3741 3743 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3742 3744 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3743 3745
3744 3746 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3745 3747
3746 3748 ddi_put32(cmd->frame_dma_obj.acc_handle,
3747 3749 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3748 3750 ddi_put32(cmd->frame_dma_obj.acc_handle,
3749 3751 &initq_info->producer_index_phys_addr_hi, 0);
3750 3752 ddi_put32(cmd->frame_dma_obj.acc_handle,
3751 3753 &initq_info->producer_index_phys_addr_lo,
3752 3754 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3753 3755 ddi_put32(cmd->frame_dma_obj.acc_handle,
3754 3756 &initq_info->consumer_index_phys_addr_hi, 0);
3755 3757 ddi_put32(cmd->frame_dma_obj.acc_handle,
3756 3758 &initq_info->consumer_index_phys_addr_lo,
3757 3759 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3758 3760
3759 3761 ddi_put32(cmd->frame_dma_obj.acc_handle,
3760 3762 &initq_info->reply_queue_start_phys_addr_hi, 0);
3761 3763 ddi_put32(cmd->frame_dma_obj.acc_handle,
3762 3764 &initq_info->reply_queue_start_phys_addr_lo,
3763 3765 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3764 3766
3765 3767 ddi_put8(cmd->frame_dma_obj.acc_handle,
3766 3768 &init_frame->cmd, MFI_CMD_OP_INIT);
3767 3769 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3768 3770 MFI_CMD_STATUS_POLL_MODE);
3769 3771 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3770 3772 ddi_put32(cmd->frame_dma_obj.acc_handle,
3771 3773 &init_frame->queue_info_new_phys_addr_lo,
3772 3774 cmd->frame_phys_addr + 64);
3773 3775 ddi_put32(cmd->frame_dma_obj.acc_handle,
3774 3776 &init_frame->queue_info_new_phys_addr_hi, 0);
3775 3777
3776 3778 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3777 3779 sizeof (struct mrsas_init_queue_info));
3778 3780
3779 3781 cmd->frame_count = 1;
3780 3782
3781 3783 /* issue the init frame in polled mode */
3782 3784 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3783 3785 con_log(CL_ANN1, (CE_WARN,
3784 3786 "mrsas_issue_init_mfi():failed to "
3785 3787 "init firmware"));
3786 3788 return_mfi_app_pkt(instance, cmd);
3787 3789 return (DDI_FAILURE);
3788 3790 }
3789 3791
3790 3792 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3791 3793 return_mfi_app_pkt(instance, cmd);
3792 3794 return (DDI_FAILURE);
3793 3795 }
3794 3796
3795 3797 return_mfi_app_pkt(instance, cmd);
3796 3798 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3797 3799
3798 3800 return (DDI_SUCCESS);
3799 3801 }
3800 3802 /*
3801 3803 * mfi_state_transition_to_ready : Move the FW to READY state
3802 3804 *
3803 3805 * @reg_set : MFI register set
3804 3806 */
3805 3807 int
3806 3808 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3807 3809 {
3808 3810 int i;
3809 3811 uint8_t max_wait;
3810 3812 uint32_t fw_ctrl = 0;
3811 3813 uint32_t fw_state;
3812 3814 uint32_t cur_state;
3813 3815 uint32_t cur_abs_reg_val;
3814 3816 uint32_t prev_abs_reg_val;
3815 3817 uint32_t status;
3816 3818
3817 3819 cur_abs_reg_val =
3818 3820 instance->func_ptr->read_fw_status_reg(instance);
3819 3821 fw_state =
3820 3822 cur_abs_reg_val & MFI_STATE_MASK;
3821 3823 con_log(CL_ANN1, (CE_CONT,
3822 3824 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3823 3825
3824 3826 while (fw_state != MFI_STATE_READY) {
3825 3827 con_log(CL_ANN, (CE_CONT,
3826 3828 "mfi_state_transition_to_ready:FW state%x", fw_state));
3827 3829
3828 3830 switch (fw_state) {
3829 3831 case MFI_STATE_FAULT:
3830 3832 con_log(CL_ANN, (CE_NOTE,
3831 3833 "mr_sas: FW in FAULT state!!"));
3832 3834
3833 3835 return (ENODEV);
3834 3836 case MFI_STATE_WAIT_HANDSHAKE:
3835 3837 /* set the CLR bit in IMR0 */
3836 3838 con_log(CL_ANN1, (CE_NOTE,
3837 3839 "mr_sas: FW waiting for HANDSHAKE"));
3838 3840 /*
3839 3841 * PCI_Hot Plug: MFI F/W requires
3840 3842 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3841 3843 * to be set
3842 3844 */
3843 3845 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3844 3846 if (!instance->tbolt && !instance->skinny) {
3845 3847 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3846 3848 MFI_INIT_HOTPLUG, instance);
3847 3849 } else {
3848 3850 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3849 3851 MFI_INIT_HOTPLUG, instance);
3850 3852 }
3851 3853 max_wait = (instance->tbolt == 1) ? 180 : 2;
3852 3854 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3853 3855 break;
3854 3856 case MFI_STATE_BOOT_MESSAGE_PENDING:
3855 3857 /* set the CLR bit in IMR0 */
3856 3858 con_log(CL_ANN1, (CE_NOTE,
3857 3859 "mr_sas: FW state boot message pending"));
3858 3860 /*
3859 3861 * PCI_Hot Plug: MFI F/W requires
3860 3862 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3861 3863 * to be set
3862 3864 */
3863 3865 if (!instance->tbolt && !instance->skinny) {
3864 3866 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3865 3867 } else {
3866 3868 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3867 3869 instance);
3868 3870 }
3869 3871 max_wait = (instance->tbolt == 1) ? 180 : 10;
3870 3872 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3871 3873 break;
3872 3874 case MFI_STATE_OPERATIONAL:
3873 3875 /* bring it to READY state; assuming max wait 2 secs */
3874 3876 instance->func_ptr->disable_intr(instance);
3875 3877 con_log(CL_ANN1, (CE_NOTE,
3876 3878 "mr_sas: FW in OPERATIONAL state"));
3877 3879 /*
3878 3880 * PCI_Hot Plug: MFI F/W requires
3879 3881 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3880 3882 * to be set
3881 3883 */
3882 3884 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3883 3885 if (!instance->tbolt && !instance->skinny) {
3884 3886 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3885 3887 } else {
3886 3888 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3887 3889 instance);
3888 3890
3889 3891 for (i = 0; i < (10 * 1000); i++) {
3890 3892 status =
3891 3893 RD_RESERVED0_REGISTER(instance);
3892 3894 if (status & 1) {
3893 3895 delay(1 *
3894 3896 drv_usectohz(MILLISEC));
3895 3897 } else {
3896 3898 break;
3897 3899 }
3898 3900 }
3899 3901
3900 3902 }
3901 3903 max_wait = (instance->tbolt == 1) ? 180 : 10;
3902 3904 cur_state = MFI_STATE_OPERATIONAL;
3903 3905 break;
3904 3906 case MFI_STATE_UNDEFINED:
3905 3907 /* this state should not last for more than 2 seconds */
3906 3908 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3907 3909
3908 3910 max_wait = (instance->tbolt == 1) ? 180 : 2;
3909 3911 cur_state = MFI_STATE_UNDEFINED;
3910 3912 break;
3911 3913 case MFI_STATE_BB_INIT:
3912 3914 max_wait = (instance->tbolt == 1) ? 180 : 2;
3913 3915 cur_state = MFI_STATE_BB_INIT;
3914 3916 break;
3915 3917 case MFI_STATE_FW_INIT:
3916 3918 max_wait = (instance->tbolt == 1) ? 180 : 2;
3917 3919 cur_state = MFI_STATE_FW_INIT;
3918 3920 break;
3919 3921 case MFI_STATE_FW_INIT_2:
3920 3922 max_wait = 180;
3921 3923 cur_state = MFI_STATE_FW_INIT_2;
3922 3924 break;
3923 3925 case MFI_STATE_DEVICE_SCAN:
3924 3926 max_wait = 180;
3925 3927 cur_state = MFI_STATE_DEVICE_SCAN;
3926 3928 prev_abs_reg_val = cur_abs_reg_val;
3927 3929 con_log(CL_NONE, (CE_NOTE,
3928 3930 "Device scan in progress ...\n"));
3929 3931 break;
3930 3932 case MFI_STATE_FLUSH_CACHE:
3931 3933 max_wait = 180;
3932 3934 cur_state = MFI_STATE_FLUSH_CACHE;
3933 3935 break;
3934 3936 default:
3935 3937 con_log(CL_ANN1, (CE_NOTE,
3936 3938 "mr_sas: Unknown state 0x%x", fw_state));
3937 3939 return (ENODEV);
3938 3940 }
3939 3941
3940 3942 /* the cur_state should not last for more than max_wait secs */
3941 3943 for (i = 0; i < (max_wait * MILLISEC); i++) {
3942 3944 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3943 3945 cur_abs_reg_val =
3944 3946 instance->func_ptr->read_fw_status_reg(instance);
3945 3947 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3946 3948
3947 3949 if (fw_state == cur_state) {
3948 3950 delay(1 * drv_usectohz(MILLISEC));
3949 3951 } else {
3950 3952 break;
3951 3953 }
3952 3954 }
3953 3955 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3954 3956 if (prev_abs_reg_val != cur_abs_reg_val) {
3955 3957 continue;
3956 3958 }
3957 3959 }
3958 3960
3959 3961 /* return error if fw_state hasn't changed after max_wait */
3960 3962 if (fw_state == cur_state) {
3961 3963 con_log(CL_ANN1, (CE_WARN,
3962 3964 "FW state hasn't changed in %d secs", max_wait));
3963 3965 return (ENODEV);
3964 3966 }
3965 3967 };
3966 3968
3967 3969 /* This may also need to apply to Skinny, but for now, don't worry. */
3968 3970 if (!instance->tbolt && !instance->skinny) {
3969 3971 fw_ctrl = RD_IB_DOORBELL(instance);
3970 3972 con_log(CL_ANN1, (CE_CONT,
3971 3973 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3972 3974
3973 3975 /*
3974 3976 * Write 0xF to the doorbell register to do the following.
3975 3977 * - Abort all outstanding commands (bit 0).
3976 3978 * - Transition from OPERATIONAL to READY state (bit 1).
3977 3979 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3978 3980 * - Set to release FW to continue running (i.e. BIOS handshake
3979 3981 * (bit 3).
3980 3982 */
3981 3983 WR_IB_DOORBELL(0xF, instance);
3982 3984 }
3983 3985
3984 3986 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3985 3987 return (EIO);
3986 3988 }
3987 3989
3988 3990 return (DDI_SUCCESS);
3989 3991 }
3990 3992
3991 3993 /*
3992 3994 * get_seq_num
3993 3995 */
3994 3996 static int
3995 3997 get_seq_num(struct mrsas_instance *instance,
3996 3998 struct mrsas_evt_log_info *eli)
3997 3999 {
3998 4000 int ret = DDI_SUCCESS;
3999 4001
4000 4002 dma_obj_t dcmd_dma_obj;
4001 4003 struct mrsas_cmd *cmd;
4002 4004 struct mrsas_dcmd_frame *dcmd;
4003 4005 struct mrsas_evt_log_info *eli_tmp;
4004 4006 if (instance->tbolt) {
4005 4007 cmd = get_raid_msg_mfi_pkt(instance);
4006 4008 } else {
4007 4009 cmd = mrsas_get_mfi_pkt(instance);
4008 4010 }
4009 4011
4010 4012 if (!cmd) {
4011 4013 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4012 4014 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4013 4015 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4014 4016 return (ENOMEM);
4015 4017 }
4016 4018
4017 4019 /* Clear the frame buffer and assign back the context id */
4018 4020 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4019 4021 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4020 4022 cmd->index);
4021 4023
4022 4024 dcmd = &cmd->frame->dcmd;
4023 4025
4024 4026 /* allocate the data transfer buffer */
4025 4027 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4026 4028 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4027 4029 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4028 4030 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4029 4031 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4030 4032 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4031 4033
4032 4034 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4033 4035 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4034 4036 cmn_err(CE_WARN,
4035 4037 "get_seq_num: could not allocate data transfer buffer.");
4036 4038 return (DDI_FAILURE);
4037 4039 }
4038 4040
4039 4041 (void) memset(dcmd_dma_obj.buffer, 0,
4040 4042 sizeof (struct mrsas_evt_log_info));
4041 4043
4042 4044 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4043 4045
4044 4046 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4045 4047 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4046 4048 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4047 4049 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4048 4050 MFI_FRAME_DIR_READ);
4049 4051 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4050 4052 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4051 4053 sizeof (struct mrsas_evt_log_info));
4052 4054 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4053 4055 MR_DCMD_CTRL_EVENT_GET_INFO);
4054 4056 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4055 4057 sizeof (struct mrsas_evt_log_info));
4056 4058 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4057 4059 dcmd_dma_obj.dma_cookie[0].dmac_address);
4058 4060
4059 4061 cmd->sync_cmd = MRSAS_TRUE;
4060 4062 cmd->frame_count = 1;
4061 4063
4062 4064 if (instance->tbolt) {
4063 4065 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4064 4066 }
4065 4067
4066 4068 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4067 4069 cmn_err(CE_WARN, "get_seq_num: "
4068 4070 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4069 4071 ret = DDI_FAILURE;
4070 4072 } else {
4071 4073 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4072 4074 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4073 4075 &eli_tmp->newest_seq_num);
4074 4076 ret = DDI_SUCCESS;
4075 4077 }
4076 4078
4077 4079 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4078 4080 ret = DDI_FAILURE;
4079 4081
4080 4082 if (instance->tbolt) {
4081 4083 return_raid_msg_mfi_pkt(instance, cmd);
4082 4084 } else {
4083 4085 mrsas_return_mfi_pkt(instance, cmd);
4084 4086 }
4085 4087
4086 4088 return (ret);
4087 4089 }
4088 4090
4089 4091 /*
4090 4092 * start_mfi_aen
4091 4093 */
4092 4094 static int
4093 4095 start_mfi_aen(struct mrsas_instance *instance)
4094 4096 {
4095 4097 int ret = 0;
4096 4098
4097 4099 struct mrsas_evt_log_info eli;
4098 4100 union mrsas_evt_class_locale class_locale;
4099 4101
4100 4102 /* get the latest sequence number from FW */
4101 4103 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4102 4104
4103 4105 if (get_seq_num(instance, &eli)) {
4104 4106 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4105 4107 return (-1);
4106 4108 }
4107 4109
4108 4110 /* register AEN with FW for latest sequence number plus 1 */
4109 4111 class_locale.members.reserved = 0;
4110 4112 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4111 4113 class_locale.members.class = MR_EVT_CLASS_INFO;
4112 4114 class_locale.word = LE_32(class_locale.word);
4113 4115 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4114 4116 class_locale.word);
4115 4117
4116 4118 if (ret) {
4117 4119 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4118 4120 return (-1);
4119 4121 }
4120 4122
4121 4123
4122 4124 return (ret);
4123 4125 }
4124 4126
4125 4127 /*
4126 4128 * flush_cache
4127 4129 */
4128 4130 static void
4129 4131 flush_cache(struct mrsas_instance *instance)
4130 4132 {
4131 4133 struct mrsas_cmd *cmd = NULL;
4132 4134 struct mrsas_dcmd_frame *dcmd;
4133 4135 if (instance->tbolt) {
4134 4136 cmd = get_raid_msg_mfi_pkt(instance);
4135 4137 } else {
4136 4138 cmd = mrsas_get_mfi_pkt(instance);
4137 4139 }
4138 4140
4139 4141 if (!cmd) {
4140 4142 con_log(CL_ANN1, (CE_WARN,
4141 4143 "flush_cache():Failed to get a cmd for flush_cache"));
4142 4144 DTRACE_PROBE2(flush_cache_err, uint16_t,
4143 4145 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4144 4146 return;
4145 4147 }
4146 4148
4147 4149 /* Clear the frame buffer and assign back the context id */
4148 4150 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4149 4151 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4150 4152 cmd->index);
4151 4153
4152 4154 dcmd = &cmd->frame->dcmd;
4153 4155
4154 4156 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4155 4157
4156 4158 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4157 4159 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4158 4160 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4159 4161 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4160 4162 MFI_FRAME_DIR_NONE);
4161 4163 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4162 4164 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4163 4165 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4164 4166 MR_DCMD_CTRL_CACHE_FLUSH);
4165 4167 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4166 4168 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4167 4169
4168 4170 cmd->frame_count = 1;
4169 4171
4170 4172 if (instance->tbolt) {
4171 4173 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4172 4174 }
4173 4175
4174 4176 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4175 4177 con_log(CL_ANN1, (CE_WARN,
4176 4178 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4177 4179 }
4178 4180 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4179 4181 if (instance->tbolt) {
4180 4182 return_raid_msg_mfi_pkt(instance, cmd);
4181 4183 } else {
4182 4184 mrsas_return_mfi_pkt(instance, cmd);
4183 4185 }
4184 4186
4185 4187 }
4186 4188
4187 4189 /*
4188 4190 * service_mfi_aen- Completes an AEN command
4189 4191 * @instance: Adapter soft state
4190 4192 * @cmd: Command to be completed
4191 4193 *
4192 4194 */
4193 4195 void
4194 4196 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4195 4197 {
4196 4198 uint32_t seq_num;
4197 4199 struct mrsas_evt_detail *evt_detail =
4198 4200 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4199 4201 int rval = 0;
4200 4202 int tgt = 0;
4201 4203 uint8_t dtype;
4202 4204 #ifdef PDSUPPORT
4203 4205 mrsas_pd_address_t *pd_addr;
4204 4206 #endif
4205 4207 ddi_acc_handle_t acc_handle;
4206 4208
4207 4209 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4208 4210
4209 4211 acc_handle = cmd->frame_dma_obj.acc_handle;
4210 4212 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4211 4213 if (cmd->cmd_status == ENODATA) {
4212 4214 cmd->cmd_status = 0;
4213 4215 }
4214 4216
4215 4217 /*
4216 4218 * log the MFI AEN event to the sysevent queue so that
4217 4219 * application will get noticed
4218 4220 */
4219 4221 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4220 4222 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4221 4223 int instance_no = ddi_get_instance(instance->dip);
4222 4224 con_log(CL_ANN, (CE_WARN,
4223 4225 "mr_sas%d: Failed to log AEN event", instance_no));
4224 4226 }
4225 4227 /*
4226 4228 * Check for any ld devices that has changed state. i.e. online
4227 4229 * or offline.
4228 4230 */
4229 4231 con_log(CL_ANN1, (CE_CONT,
4230 4232 "AEN: code = %x class = %x locale = %x args = %x",
4231 4233 ddi_get32(acc_handle, &evt_detail->code),
4232 4234 evt_detail->cl.members.class,
4233 4235 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4234 4236 ddi_get8(acc_handle, &evt_detail->arg_type)));
4235 4237
4236 4238 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4237 4239 case MR_EVT_CFG_CLEARED: {
4238 4240 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4239 4241 if (instance->mr_ld_list[tgt].dip != NULL) {
4240 4242 mutex_enter(&instance->config_dev_mtx);
4241 4243 instance->mr_ld_list[tgt].flag =
4242 4244 (uint8_t)~MRDRV_TGT_VALID;
4243 4245 mutex_exit(&instance->config_dev_mtx);
4244 4246 rval = mrsas_service_evt(instance, tgt, 0,
4245 4247 MRSAS_EVT_UNCONFIG_TGT, NULL);
4246 4248 con_log(CL_ANN1, (CE_WARN,
4247 4249 "mr_sas: CFG CLEARED AEN rval = %d "
4248 4250 "tgt id = %d", rval, tgt));
4249 4251 }
4250 4252 }
4251 4253 break;
4252 4254 }
4253 4255
4254 4256 case MR_EVT_LD_DELETED: {
4255 4257 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4256 4258 mutex_enter(&instance->config_dev_mtx);
4257 4259 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4258 4260 mutex_exit(&instance->config_dev_mtx);
4259 4261 rval = mrsas_service_evt(instance,
4260 4262 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4261 4263 MRSAS_EVT_UNCONFIG_TGT, NULL);
4262 4264 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4263 4265 "tgt id = %d index = %d", rval,
4264 4266 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4265 4267 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4266 4268 break;
4267 4269 } /* End of MR_EVT_LD_DELETED */
4268 4270
4269 4271 case MR_EVT_LD_CREATED: {
4270 4272 rval = mrsas_service_evt(instance,
4271 4273 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4272 4274 MRSAS_EVT_CONFIG_TGT, NULL);
4273 4275 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4274 4276 "tgt id = %d index = %d", rval,
4275 4277 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4276 4278 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4277 4279 break;
4278 4280 } /* End of MR_EVT_LD_CREATED */
4279 4281
4280 4282 #ifdef PDSUPPORT
4281 4283 case MR_EVT_PD_REMOVED_EXT: {
4282 4284 if (instance->tbolt || instance->skinny) {
4283 4285 pd_addr = &evt_detail->args.pd_addr;
4284 4286 dtype = pd_addr->scsi_dev_type;
4285 4287 con_log(CL_DLEVEL1, (CE_NOTE,
4286 4288 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4287 4289 " arg_type = %d ", dtype, evt_detail->arg_type));
4288 4290 tgt = ddi_get16(acc_handle,
4289 4291 &evt_detail->args.pd.device_id);
4290 4292 mutex_enter(&instance->config_dev_mtx);
4291 4293 instance->mr_tbolt_pd_list[tgt].flag =
4292 4294 (uint8_t)~MRDRV_TGT_VALID;
4293 4295 mutex_exit(&instance->config_dev_mtx);
4294 4296 rval = mrsas_service_evt(instance, ddi_get16(
4295 4297 acc_handle, &evt_detail->args.pd.device_id),
4296 4298 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4297 4299 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4298 4300 "rval = %d tgt id = %d ", rval,
4299 4301 ddi_get16(acc_handle,
4300 4302 &evt_detail->args.pd.device_id)));
4301 4303 }
4302 4304 break;
4303 4305 } /* End of MR_EVT_PD_REMOVED_EXT */
4304 4306
4305 4307 case MR_EVT_PD_INSERTED_EXT: {
4306 4308 if (instance->tbolt || instance->skinny) {
4307 4309 rval = mrsas_service_evt(instance,
4308 4310 ddi_get16(acc_handle,
4309 4311 &evt_detail->args.pd.device_id),
4310 4312 1, MRSAS_EVT_CONFIG_TGT, NULL);
4311 4313 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4312 4314 "rval = %d tgt id = %d ", rval,
4313 4315 ddi_get16(acc_handle,
4314 4316 &evt_detail->args.pd.device_id)));
4315 4317 }
4316 4318 break;
4317 4319 } /* End of MR_EVT_PD_INSERTED_EXT */
4318 4320
4319 4321 case MR_EVT_PD_STATE_CHANGE: {
4320 4322 if (instance->tbolt || instance->skinny) {
4321 4323 tgt = ddi_get16(acc_handle,
4322 4324 &evt_detail->args.pd.device_id);
4323 4325 if ((evt_detail->args.pd_state.prevState ==
4324 4326 PD_SYSTEM) &&
4325 4327 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4326 4328 mutex_enter(&instance->config_dev_mtx);
4327 4329 instance->mr_tbolt_pd_list[tgt].flag =
4328 4330 (uint8_t)~MRDRV_TGT_VALID;
4329 4331 mutex_exit(&instance->config_dev_mtx);
4330 4332 rval = mrsas_service_evt(instance,
4331 4333 ddi_get16(acc_handle,
4332 4334 &evt_detail->args.pd.device_id),
4333 4335 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4334 4336 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4335 4337 "rval = %d tgt id = %d ", rval,
4336 4338 ddi_get16(acc_handle,
4337 4339 &evt_detail->args.pd.device_id)));
4338 4340 break;
4339 4341 }
4340 4342 if ((evt_detail->args.pd_state.prevState
4341 4343 == UNCONFIGURED_GOOD) &&
4342 4344 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4343 4345 rval = mrsas_service_evt(instance,
4344 4346 ddi_get16(acc_handle,
4345 4347 &evt_detail->args.pd.device_id),
4346 4348 1, MRSAS_EVT_CONFIG_TGT, NULL);
4347 4349 con_log(CL_ANN1, (CE_WARN,
4348 4350 "mr_sas: PD_INSERTED: rval = %d "
4349 4351 " tgt id = %d ", rval,
4350 4352 ddi_get16(acc_handle,
4351 4353 &evt_detail->args.pd.device_id)));
4352 4354 break;
4353 4355 }
4354 4356 }
4355 4357 break;
4356 4358 }
4357 4359 #endif
4358 4360
4359 4361 } /* End of Main Switch */
4360 4362
4361 4363 /* get copy of seq_num and class/locale for re-registration */
4362 4364 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4363 4365 seq_num++;
4364 4366 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4365 4367 sizeof (struct mrsas_evt_detail));
4366 4368
4367 4369 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4368 4370 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4369 4371
4370 4372 instance->aen_seq_num = seq_num;
4371 4373
4372 4374 cmd->frame_count = 1;
4373 4375
4374 4376 cmd->retry_count_for_ocr = 0;
4375 4377 cmd->drv_pkt_time = 0;
4376 4378
4377 4379 /* Issue the aen registration frame */
4378 4380 instance->func_ptr->issue_cmd(cmd, instance);
4379 4381 }
4380 4382
4381 4383 /*
4382 4384 * complete_cmd_in_sync_mode - Completes an internal command
4383 4385 * @instance: Adapter soft state
4384 4386 * @cmd: Command to be completed
4385 4387 *
4386 4388 * The issue_cmd_in_sync_mode() function waits for a command to complete
4387 4389 * after it issues a command. This function wakes up that waiting routine by
4388 4390 * calling wake_up() on the wait queue.
4389 4391 */
4390 4392 static void
4391 4393 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4392 4394 struct mrsas_cmd *cmd)
4393 4395 {
4394 4396 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4395 4397 &cmd->frame->io.cmd_status);
4396 4398
4397 4399 cmd->sync_cmd = MRSAS_FALSE;
4398 4400
4399 4401 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4400 4402 (void *)cmd));
4401 4403
4402 4404 mutex_enter(&instance->int_cmd_mtx);
4403 4405 if (cmd->cmd_status == ENODATA) {
4404 4406 cmd->cmd_status = 0;
4405 4407 }
4406 4408 cv_broadcast(&instance->int_cmd_cv);
4407 4409 mutex_exit(&instance->int_cmd_mtx);
4408 4410
4409 4411 }
4410 4412
4411 4413 /*
4412 4414 * Call this function inside mrsas_softintr.
4413 4415 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4414 4416 * @instance: Adapter soft state
4415 4417 */
4416 4418
4417 4419 static uint32_t
4418 4420 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4419 4421 {
4420 4422 uint32_t cur_abs_reg_val;
4421 4423 uint32_t fw_state;
4422 4424
4423 4425 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4424 4426 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4425 4427 if (fw_state == MFI_STATE_FAULT) {
4426 4428 if (instance->disable_online_ctrl_reset == 1) {
4427 4429 cmn_err(CE_WARN,
4428 4430 "mrsas_initiate_ocr_if_fw_is_faulty: "
4429 4431 "FW in Fault state, detected in ISR: "
4430 4432 "FW doesn't support ocr ");
4431 4433
4432 4434 return (ADAPTER_RESET_NOT_REQUIRED);
4433 4435 } else {
4434 4436 con_log(CL_ANN, (CE_NOTE,
4435 4437 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4436 4438 "state, detected in ISR: FW supports ocr "));
4437 4439
4438 4440 return (ADAPTER_RESET_REQUIRED);
4439 4441 }
4440 4442 }
4441 4443
4442 4444 return (ADAPTER_RESET_NOT_REQUIRED);
4443 4445 }
4444 4446
4445 4447 /*
4446 4448 * mrsas_softintr - The Software ISR
4447 4449 * @param arg : HBA soft state
4448 4450 *
4449 4451 * called from high-level interrupt if hi-level interrupt are not there,
4450 4452 * otherwise triggered as a soft interrupt
4451 4453 */
4452 4454 static uint_t
4453 4455 mrsas_softintr(struct mrsas_instance *instance)
4454 4456 {
4455 4457 struct scsi_pkt *pkt;
4456 4458 struct scsa_cmd *acmd;
4457 4459 struct mrsas_cmd *cmd;
4458 4460 struct mlist_head *pos, *next;
4459 4461 mlist_t process_list;
4460 4462 struct mrsas_header *hdr;
4461 4463 struct scsi_arq_status *arqstat;
4462 4464
4463 4465 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4464 4466
4465 4467 ASSERT(instance);
4466 4468
4467 4469 mutex_enter(&instance->completed_pool_mtx);
4468 4470
4469 4471 if (mlist_empty(&instance->completed_pool_list)) {
4470 4472 mutex_exit(&instance->completed_pool_mtx);
4471 4473 return (DDI_INTR_CLAIMED);
4472 4474 }
4473 4475
4474 4476 instance->softint_running = 1;
4475 4477
4476 4478 INIT_LIST_HEAD(&process_list);
4477 4479 mlist_splice(&instance->completed_pool_list, &process_list);
4478 4480 INIT_LIST_HEAD(&instance->completed_pool_list);
4479 4481
4480 4482 mutex_exit(&instance->completed_pool_mtx);
4481 4483
4482 4484 /* perform all callbacks first, before releasing the SCBs */
4483 4485 mlist_for_each_safe(pos, next, &process_list) {
4484 4486 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4485 4487
4486 4488 /* syncronize the Cmd frame for the controller */
4487 4489 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4488 4490 0, 0, DDI_DMA_SYNC_FORCPU);
4489 4491
4490 4492 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4491 4493 DDI_SUCCESS) {
4492 4494 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4493 4495 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4494 4496 con_log(CL_ANN1, (CE_WARN,
4495 4497 "mrsas_softintr: "
4496 4498 "FMA check reports DMA handle failure"));
4497 4499 return (DDI_INTR_CLAIMED);
4498 4500 }
4499 4501
4500 4502 hdr = &cmd->frame->hdr;
4501 4503
4502 4504 /* remove the internal command from the process list */
4503 4505 mlist_del_init(&cmd->list);
4504 4506
4505 4507 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4506 4508 case MFI_CMD_OP_PD_SCSI:
4507 4509 case MFI_CMD_OP_LD_SCSI:
4508 4510 case MFI_CMD_OP_LD_READ:
4509 4511 case MFI_CMD_OP_LD_WRITE:
4510 4512 /*
4511 4513 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4512 4514 * could have been issued either through an
4513 4515 * IO path or an IOCTL path. If it was via IOCTL,
4514 4516 * we will send it to internal completion.
4515 4517 */
4516 4518 if (cmd->sync_cmd == MRSAS_TRUE) {
4517 4519 complete_cmd_in_sync_mode(instance, cmd);
4518 4520 break;
4519 4521 }
4520 4522
4521 4523 /* regular commands */
4522 4524 acmd = cmd->cmd;
4523 4525 pkt = CMD2PKT(acmd);
4524 4526
4525 4527 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4526 4528 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4527 4529 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4528 4530 acmd->cmd_dma_offset,
4529 4531 acmd->cmd_dma_len,
4530 4532 DDI_DMA_SYNC_FORCPU);
4531 4533 }
4532 4534 }
4533 4535
4534 4536 pkt->pkt_reason = CMD_CMPLT;
4535 4537 pkt->pkt_statistics = 0;
4536 4538 pkt->pkt_state = STATE_GOT_BUS
4537 4539 | STATE_GOT_TARGET | STATE_SENT_CMD
4538 4540 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4539 4541
4540 4542 con_log(CL_ANN, (CE_CONT,
4541 4543 "CDB[0] = %x completed for %s: size %lx context %x",
4542 4544 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4543 4545 acmd->cmd_dmacount, hdr->context));
4544 4546 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4545 4547 uint_t, acmd->cmd_cdblen, ulong_t,
4546 4548 acmd->cmd_dmacount);
4547 4549
4548 4550 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4549 4551 struct scsi_inquiry *inq;
4550 4552
4551 4553 if (acmd->cmd_dmacount != 0) {
4552 4554 bp_mapin(acmd->cmd_buf);
4553 4555 inq = (struct scsi_inquiry *)
4554 4556 acmd->cmd_buf->b_un.b_addr;
4555 4557
4556 4558 #ifdef PDSUPPORT
4557 4559 if (hdr->cmd_status == MFI_STAT_OK) {
4558 4560 display_scsi_inquiry(
4559 4561 (caddr_t)inq);
4560 4562 }
4561 4563 #else
4562 4564 /* don't expose physical drives to OS */
4563 4565 if (acmd->islogical &&
4564 4566 (hdr->cmd_status == MFI_STAT_OK)) {
4565 4567 display_scsi_inquiry(
4566 4568 (caddr_t)inq);
4567 4569 } else if ((hdr->cmd_status ==
4568 4570 MFI_STAT_OK) && inq->inq_dtype ==
4569 4571 DTYPE_DIRECT) {
4570 4572
4571 4573 display_scsi_inquiry(
4572 4574 (caddr_t)inq);
4573 4575
4574 4576 /* for physical disk */
4575 4577 hdr->cmd_status =
4576 4578 MFI_STAT_DEVICE_NOT_FOUND;
4577 4579 }
4578 4580 #endif /* PDSUPPORT */
4579 4581 }
4580 4582 }
4581 4583
4582 4584 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4583 4585 uint8_t, hdr->cmd_status);
4584 4586
4585 4587 switch (hdr->cmd_status) {
4586 4588 case MFI_STAT_OK:
4587 4589 pkt->pkt_scbp[0] = STATUS_GOOD;
4588 4590 break;
4589 4591 case MFI_STAT_LD_CC_IN_PROGRESS:
4590 4592 case MFI_STAT_LD_RECON_IN_PROGRESS:
4591 4593 pkt->pkt_scbp[0] = STATUS_GOOD;
4592 4594 break;
4593 4595 case MFI_STAT_LD_INIT_IN_PROGRESS:
4594 4596 con_log(CL_ANN,
4595 4597 (CE_WARN, "Initialization in Progress"));
4596 4598 pkt->pkt_reason = CMD_TRAN_ERR;
4597 4599
4598 4600 break;
4599 4601 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4600 4602 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4601 4603
4602 4604 pkt->pkt_reason = CMD_CMPLT;
4603 4605 ((struct scsi_status *)
4604 4606 pkt->pkt_scbp)->sts_chk = 1;
4605 4607
4606 4608 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4607 4609 con_log(CL_ANN,
4608 4610 (CE_WARN, "TEST_UNIT_READY fail"));
4609 4611 } else {
4610 4612 pkt->pkt_state |= STATE_ARQ_DONE;
4611 4613 arqstat = (void *)(pkt->pkt_scbp);
4612 4614 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4613 4615 arqstat->sts_rqpkt_resid = 0;
4614 4616 arqstat->sts_rqpkt_state |=
4615 4617 STATE_GOT_BUS | STATE_GOT_TARGET
4616 4618 | STATE_SENT_CMD
4617 4619 | STATE_XFERRED_DATA;
4618 4620 *(uint8_t *)&arqstat->sts_rqpkt_status =
4619 4621 STATUS_GOOD;
4620 4622 ddi_rep_get8(
4621 4623 cmd->frame_dma_obj.acc_handle,
4622 4624 (uint8_t *)
4623 4625 &(arqstat->sts_sensedata),
4624 4626 cmd->sense,
4625 4627 sizeof (struct scsi_extended_sense),
4626 4628 DDI_DEV_AUTOINCR);
4627 4629 }
4628 4630 break;
4629 4631 case MFI_STAT_LD_OFFLINE:
4630 4632 case MFI_STAT_DEVICE_NOT_FOUND:
4631 4633 con_log(CL_ANN, (CE_CONT,
4632 4634 "mrsas_softintr:device not found error"));
4633 4635 pkt->pkt_reason = CMD_DEV_GONE;
4634 4636 pkt->pkt_statistics = STAT_DISCON;
4635 4637 break;
4636 4638 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4637 4639 pkt->pkt_state |= STATE_ARQ_DONE;
4638 4640 pkt->pkt_reason = CMD_CMPLT;
4639 4641 ((struct scsi_status *)
4640 4642 pkt->pkt_scbp)->sts_chk = 1;
4641 4643
4642 4644 arqstat = (void *)(pkt->pkt_scbp);
4643 4645 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4644 4646 arqstat->sts_rqpkt_resid = 0;
4645 4647 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4646 4648 | STATE_GOT_TARGET | STATE_SENT_CMD
4647 4649 | STATE_XFERRED_DATA;
4648 4650 *(uint8_t *)&arqstat->sts_rqpkt_status =
4649 4651 STATUS_GOOD;
4650 4652
4651 4653 arqstat->sts_sensedata.es_valid = 1;
4652 4654 arqstat->sts_sensedata.es_key =
4653 4655 KEY_ILLEGAL_REQUEST;
4654 4656 arqstat->sts_sensedata.es_class =
4655 4657 CLASS_EXTENDED_SENSE;
4656 4658
4657 4659 /*
4658 4660 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4659 4661 * ASC: 0x21h; ASCQ: 0x00h;
4660 4662 */
4661 4663 arqstat->sts_sensedata.es_add_code = 0x21;
4662 4664 arqstat->sts_sensedata.es_qual_code = 0x00;
4663 4665
4664 4666 break;
4665 4667
4666 4668 default:
4667 4669 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4668 4670 pkt->pkt_reason = CMD_TRAN_ERR;
4669 4671
4670 4672 break;
4671 4673 }
4672 4674
4673 4675 atomic_add_16(&instance->fw_outstanding, (-1));
4674 4676
4675 4677 (void) mrsas_common_check(instance, cmd);
4676 4678
4677 4679 if (acmd->cmd_dmahandle) {
4678 4680 if (mrsas_check_dma_handle(
4679 4681 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4680 4682 ddi_fm_service_impact(instance->dip,
4681 4683 DDI_SERVICE_UNAFFECTED);
4682 4684 pkt->pkt_reason = CMD_TRAN_ERR;
4683 4685 pkt->pkt_statistics = 0;
4684 4686 }
4685 4687 }
4686 4688
4687 4689 mrsas_return_mfi_pkt(instance, cmd);
4688 4690
4689 4691 /* Call the callback routine */
4690 4692 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4691 4693 pkt->pkt_comp) {
4692 4694 (*pkt->pkt_comp)(pkt);
4693 4695 }
4694 4696
4695 4697 break;
4696 4698
4697 4699 case MFI_CMD_OP_SMP:
4698 4700 case MFI_CMD_OP_STP:
4699 4701 complete_cmd_in_sync_mode(instance, cmd);
4700 4702 break;
4701 4703
4702 4704 case MFI_CMD_OP_DCMD:
4703 4705 /* see if got an event notification */
4704 4706 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4705 4707 &cmd->frame->dcmd.opcode) ==
4706 4708 MR_DCMD_CTRL_EVENT_WAIT) {
4707 4709 if ((instance->aen_cmd == cmd) &&
4708 4710 (instance->aen_cmd->abort_aen)) {
4709 4711 con_log(CL_ANN, (CE_WARN,
4710 4712 "mrsas_softintr: "
4711 4713 "aborted_aen returned"));
4712 4714 } else {
4713 4715 atomic_add_16(&instance->fw_outstanding,
4714 4716 (-1));
4715 4717 service_mfi_aen(instance, cmd);
4716 4718 }
4717 4719 } else {
4718 4720 complete_cmd_in_sync_mode(instance, cmd);
4719 4721 }
4720 4722
4721 4723 break;
4722 4724
4723 4725 case MFI_CMD_OP_ABORT:
4724 4726 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4725 4727 /*
4726 4728 * MFI_CMD_OP_ABORT successfully completed
4727 4729 * in the synchronous mode
4728 4730 */
4729 4731 complete_cmd_in_sync_mode(instance, cmd);
4730 4732 break;
4731 4733
4732 4734 default:
4733 4735 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4734 4736 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4735 4737
4736 4738 if (cmd->pkt != NULL) {
4737 4739 pkt = cmd->pkt;
4738 4740 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4739 4741 pkt->pkt_comp) {
4740 4742
4741 4743 con_log(CL_ANN1, (CE_CONT, "posting to "
4742 4744 "scsa cmd %p index %x pkt %p"
4743 4745 "time %llx, default ", (void *)cmd,
4744 4746 cmd->index, (void *)pkt,
4745 4747 gethrtime()));
4746 4748
4747 4749 (*pkt->pkt_comp)(pkt);
4748 4750
4749 4751 }
4750 4752 }
4751 4753 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4752 4754 break;
4753 4755 }
4754 4756 }
4755 4757
4756 4758 instance->softint_running = 0;
4757 4759
4758 4760 return (DDI_INTR_CLAIMED);
4759 4761 }
4760 4762
4761 4763 /*
4762 4764 * mrsas_alloc_dma_obj
4763 4765 *
4764 4766 * Allocate the memory and other resources for an dma object.
4765 4767 */
4766 4768 int
4767 4769 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4768 4770 uchar_t endian_flags)
4769 4771 {
4770 4772 int i;
4771 4773 size_t alen = 0;
4772 4774 uint_t cookie_cnt;
4773 4775 struct ddi_device_acc_attr tmp_endian_attr;
4774 4776
4775 4777 tmp_endian_attr = endian_attr;
4776 4778 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4777 4779 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4778 4780
4779 4781 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4780 4782 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4781 4783 if (i != DDI_SUCCESS) {
4782 4784
4783 4785 switch (i) {
4784 4786 case DDI_DMA_BADATTR :
4785 4787 con_log(CL_ANN, (CE_WARN,
4786 4788 "Failed ddi_dma_alloc_handle- Bad attribute"));
4787 4789 break;
4788 4790 case DDI_DMA_NORESOURCES :
4789 4791 con_log(CL_ANN, (CE_WARN,
4790 4792 "Failed ddi_dma_alloc_handle- No Resources"));
4791 4793 break;
4792 4794 default :
4793 4795 con_log(CL_ANN, (CE_WARN,
4794 4796 "Failed ddi_dma_alloc_handle: "
4795 4797 "unknown status %d", i));
4796 4798 break;
4797 4799 }
4798 4800
4799 4801 return (-1);
4800 4802 }
4801 4803
4802 4804 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4803 4805 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4804 4806 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4805 4807 alen < obj->size) {
4806 4808
4807 4809 ddi_dma_free_handle(&obj->dma_handle);
4808 4810
4809 4811 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4810 4812
4811 4813 return (-1);
4812 4814 }
4813 4815
4814 4816 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4815 4817 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4816 4818 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4817 4819
4818 4820 ddi_dma_mem_free(&obj->acc_handle);
4819 4821 ddi_dma_free_handle(&obj->dma_handle);
4820 4822
4821 4823 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4822 4824
4823 4825 return (-1);
4824 4826 }
4825 4827
4826 4828 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4827 4829 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4828 4830 return (-1);
4829 4831 }
4830 4832
4831 4833 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4832 4834 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4833 4835 return (-1);
4834 4836 }
4835 4837
4836 4838 return (cookie_cnt);
4837 4839 }
4838 4840
4839 4841 /*
4840 4842 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4841 4843 *
4842 4844 * De-allocate the memory and other resources for an dma object, which must
4843 4845 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4844 4846 */
4845 4847 int
4846 4848 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4847 4849 {
4848 4850
4849 4851 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4850 4852 return (DDI_SUCCESS);
4851 4853 }
4852 4854
4853 4855 /*
4854 4856 * NOTE: These check-handle functions fail if *_handle == NULL, but
4855 4857 * this function succeeds because of the previous check.
4856 4858 */
4857 4859 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4858 4860 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4859 4861 return (DDI_FAILURE);
4860 4862 }
4861 4863
4862 4864 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4863 4865 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4864 4866 return (DDI_FAILURE);
4865 4867 }
4866 4868
4867 4869 (void) ddi_dma_unbind_handle(obj.dma_handle);
4868 4870 ddi_dma_mem_free(&obj.acc_handle);
4869 4871 ddi_dma_free_handle(&obj.dma_handle);
4870 4872 obj.acc_handle = NULL;
4871 4873 return (DDI_SUCCESS);
4872 4874 }
4873 4875
4874 4876 /*
4875 4877 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4876 4878 * int, int (*)())
4877 4879 *
4878 4880 * Allocate dma resources for a new scsi command
4879 4881 */
4880 4882 int
4881 4883 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4882 4884 struct buf *bp, int flags, int (*callback)())
4883 4885 {
4884 4886 int dma_flags;
4885 4887 int (*cb)(caddr_t);
4886 4888 int i;
4887 4889
4888 4890 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4889 4891 struct scsa_cmd *acmd = PKT2CMD(pkt);
4890 4892
4891 4893 acmd->cmd_buf = bp;
4892 4894
4893 4895 if (bp->b_flags & B_READ) {
4894 4896 acmd->cmd_flags &= ~CFLAG_DMASEND;
4895 4897 dma_flags = DDI_DMA_READ;
4896 4898 } else {
4897 4899 acmd->cmd_flags |= CFLAG_DMASEND;
4898 4900 dma_flags = DDI_DMA_WRITE;
4899 4901 }
4900 4902
4901 4903 if (flags & PKT_CONSISTENT) {
4902 4904 acmd->cmd_flags |= CFLAG_CONSISTENT;
4903 4905 dma_flags |= DDI_DMA_CONSISTENT;
4904 4906 }
4905 4907
4906 4908 if (flags & PKT_DMA_PARTIAL) {
4907 4909 dma_flags |= DDI_DMA_PARTIAL;
4908 4910 }
4909 4911
4910 4912 dma_flags |= DDI_DMA_REDZONE;
4911 4913
4912 4914 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4913 4915
4914 4916 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4915 4917 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4916 4918 if (instance->tbolt) {
4917 4919 /* OCR-RESET FIX */
4918 4920 tmp_dma_attr.dma_attr_count_max =
4919 4921 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4920 4922 tmp_dma_attr.dma_attr_maxxfer =
4921 4923 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4922 4924 }
4923 4925
4924 4926 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4925 4927 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4926 4928 switch (i) {
4927 4929 case DDI_DMA_BADATTR:
4928 4930 bioerror(bp, EFAULT);
4929 4931 return (DDI_FAILURE);
4930 4932
4931 4933 case DDI_DMA_NORESOURCES:
4932 4934 bioerror(bp, 0);
4933 4935 return (DDI_FAILURE);
4934 4936
4935 4937 default:
4936 4938 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4937 4939 "impossible result (0x%x)", i));
4938 4940 bioerror(bp, EFAULT);
4939 4941 return (DDI_FAILURE);
4940 4942 }
4941 4943 }
4942 4944
4943 4945 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4944 4946 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4945 4947
4946 4948 switch (i) {
4947 4949 case DDI_DMA_PARTIAL_MAP:
4948 4950 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4949 4951 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4950 4952 "DDI_DMA_PARTIAL_MAP impossible"));
4951 4953 goto no_dma_cookies;
4952 4954 }
4953 4955
4954 4956 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4955 4957 DDI_FAILURE) {
4956 4958 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4957 4959 goto no_dma_cookies;
4958 4960 }
4959 4961
4960 4962 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4961 4963 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4962 4964 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4963 4965 DDI_FAILURE) {
4964 4966
4965 4967 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4966 4968 goto no_dma_cookies;
4967 4969 }
4968 4970
4969 4971 goto get_dma_cookies;
4970 4972 case DDI_DMA_MAPPED:
4971 4973 acmd->cmd_nwin = 1;
4972 4974 acmd->cmd_dma_len = 0;
4973 4975 acmd->cmd_dma_offset = 0;
4974 4976
4975 4977 get_dma_cookies:
4976 4978 i = 0;
4977 4979 acmd->cmd_dmacount = 0;
4978 4980 for (;;) {
4979 4981 acmd->cmd_dmacount +=
4980 4982 acmd->cmd_dmacookies[i++].dmac_size;
4981 4983
4982 4984 if (i == instance->max_num_sge ||
4983 4985 i == acmd->cmd_ncookies)
4984 4986 break;
4985 4987
4986 4988 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4987 4989 &acmd->cmd_dmacookies[i]);
4988 4990 }
4989 4991
4990 4992 acmd->cmd_cookie = i;
4991 4993 acmd->cmd_cookiecnt = i;
4992 4994
4993 4995 acmd->cmd_flags |= CFLAG_DMAVALID;
4994 4996
4995 4997 if (bp->b_bcount >= acmd->cmd_dmacount) {
4996 4998 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4997 4999 } else {
4998 5000 pkt->pkt_resid = 0;
4999 5001 }
5000 5002
5001 5003 return (DDI_SUCCESS);
5002 5004 case DDI_DMA_NORESOURCES:
5003 5005 bioerror(bp, 0);
5004 5006 break;
5005 5007 case DDI_DMA_NOMAPPING:
5006 5008 bioerror(bp, EFAULT);
5007 5009 break;
5008 5010 case DDI_DMA_TOOBIG:
5009 5011 bioerror(bp, EINVAL);
5010 5012 break;
5011 5013 case DDI_DMA_INUSE:
5012 5014 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
5013 5015 " DDI_DMA_INUSE impossible"));
5014 5016 break;
5015 5017 default:
5016 5018 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5017 5019 "impossible result (0x%x)", i));
5018 5020 break;
5019 5021 }
5020 5022
5021 5023 no_dma_cookies:
5022 5024 ddi_dma_free_handle(&acmd->cmd_dmahandle);
5023 5025 acmd->cmd_dmahandle = NULL;
5024 5026 acmd->cmd_flags &= ~CFLAG_DMAVALID;
5025 5027 return (DDI_FAILURE);
5026 5028 }
5027 5029
5028 5030 /*
5029 5031 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5030 5032 *
5031 5033 * move dma resources to next dma window
5032 5034 *
5033 5035 */
5034 5036 int
5035 5037 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5036 5038 struct buf *bp)
5037 5039 {
5038 5040 int i = 0;
5039 5041
5040 5042 struct scsa_cmd *acmd = PKT2CMD(pkt);
5041 5043
5042 5044 /*
5043 5045 * If there are no more cookies remaining in this window,
5044 5046 * must move to the next window first.
5045 5047 */
5046 5048 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5047 5049 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5048 5050 return (DDI_SUCCESS);
5049 5051 }
5050 5052
5051 5053 /* at last window, cannot move */
5052 5054 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5053 5055 return (DDI_FAILURE);
5054 5056 }
5055 5057
5056 5058 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5057 5059 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5058 5060 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5059 5061 DDI_FAILURE) {
5060 5062 return (DDI_FAILURE);
5061 5063 }
5062 5064
5063 5065 acmd->cmd_cookie = 0;
5064 5066 } else {
5065 5067 /* still more cookies in this window - get the next one */
5066 5068 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5067 5069 &acmd->cmd_dmacookies[0]);
5068 5070 }
5069 5071
5070 5072 /* get remaining cookies in this window, up to our maximum */
5071 5073 for (;;) {
5072 5074 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5073 5075 acmd->cmd_cookie++;
5074 5076
5075 5077 if (i == instance->max_num_sge ||
5076 5078 acmd->cmd_cookie == acmd->cmd_ncookies) {
5077 5079 break;
5078 5080 }
5079 5081
5080 5082 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5081 5083 &acmd->cmd_dmacookies[i]);
5082 5084 }
5083 5085
5084 5086 acmd->cmd_cookiecnt = i;
5085 5087
5086 5088 if (bp->b_bcount >= acmd->cmd_dmacount) {
5087 5089 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5088 5090 } else {
5089 5091 pkt->pkt_resid = 0;
5090 5092 }
5091 5093
5092 5094 return (DDI_SUCCESS);
5093 5095 }
5094 5096
5095 5097 /*
5096 5098 * build_cmd
5097 5099 */
5098 5100 static struct mrsas_cmd *
5099 5101 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5100 5102 struct scsi_pkt *pkt, uchar_t *cmd_done)
5101 5103 {
5102 5104 uint16_t flags = 0;
5103 5105 uint32_t i;
5104 5106 uint32_t context;
5105 5107 uint32_t sge_bytes;
5106 5108 uint32_t tmp_data_xfer_len;
5107 5109 ddi_acc_handle_t acc_handle;
5108 5110 struct mrsas_cmd *cmd;
5109 5111 struct mrsas_sge64 *mfi_sgl;
5110 5112 struct mrsas_sge_ieee *mfi_sgl_ieee;
5111 5113 struct scsa_cmd *acmd = PKT2CMD(pkt);
5112 5114 struct mrsas_pthru_frame *pthru;
5113 5115 struct mrsas_io_frame *ldio;
5114 5116
5115 5117 /* find out if this is logical or physical drive command. */
5116 5118 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5117 5119 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5118 5120 *cmd_done = 0;
5119 5121
5120 5122 /* get the command packet */
5121 5123 if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5122 5124 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5123 5125 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5124 5126 return (NULL);
5125 5127 }
5126 5128
5127 5129 acc_handle = cmd->frame_dma_obj.acc_handle;
5128 5130
5129 5131 /* Clear the frame buffer and assign back the context id */
5130 5132 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5131 5133 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5132 5134
5133 5135 cmd->pkt = pkt;
5134 5136 cmd->cmd = acmd;
5135 5137 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5136 5138 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5137 5139
5138 5140 /* lets get the command directions */
5139 5141 if (acmd->cmd_flags & CFLAG_DMASEND) {
5140 5142 flags = MFI_FRAME_DIR_WRITE;
5141 5143
5142 5144 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5143 5145 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5144 5146 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5145 5147 DDI_DMA_SYNC_FORDEV);
5146 5148 }
5147 5149 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5148 5150 flags = MFI_FRAME_DIR_READ;
5149 5151
5150 5152 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5151 5153 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5152 5154 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5153 5155 DDI_DMA_SYNC_FORCPU);
5154 5156 }
5155 5157 } else {
5156 5158 flags = MFI_FRAME_DIR_NONE;
5157 5159 }
5158 5160
5159 5161 if (instance->flag_ieee) {
5160 5162 flags |= MFI_FRAME_IEEE;
5161 5163 }
5162 5164 flags |= MFI_FRAME_SGL64;
5163 5165
5164 5166 switch (pkt->pkt_cdbp[0]) {
5165 5167
5166 5168 /*
5167 5169 * case SCMD_SYNCHRONIZE_CACHE:
5168 5170 * flush_cache(instance);
5169 5171 * mrsas_return_mfi_pkt(instance, cmd);
5170 5172 * *cmd_done = 1;
5171 5173 *
5172 5174 * return (NULL);
5173 5175 */
5174 5176
5175 5177 case SCMD_READ:
5176 5178 case SCMD_WRITE:
5177 5179 case SCMD_READ_G1:
5178 5180 case SCMD_WRITE_G1:
5179 5181 case SCMD_READ_G4:
5180 5182 case SCMD_WRITE_G4:
5181 5183 case SCMD_READ_G5:
5182 5184 case SCMD_WRITE_G5:
5183 5185 if (acmd->islogical) {
5184 5186 ldio = (struct mrsas_io_frame *)cmd->frame;
5185 5187
5186 5188 /*
5187 5189 * preare the Logical IO frame:
5188 5190 * 2nd bit is zero for all read cmds
5189 5191 */
5190 5192 ddi_put8(acc_handle, &ldio->cmd,
5191 5193 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5192 5194 : MFI_CMD_OP_LD_READ);
5193 5195 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5194 5196 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5195 5197 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5196 5198 ddi_put16(acc_handle, &ldio->timeout, 0);
5197 5199 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5198 5200 ddi_put16(acc_handle, &ldio->pad_0, 0);
5199 5201 ddi_put16(acc_handle, &ldio->flags, flags);
5200 5202
5201 5203 /* Initialize sense Information */
5202 5204 bzero(cmd->sense, SENSE_LENGTH);
5203 5205 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5204 5206 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5205 5207 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5206 5208 cmd->sense_phys_addr);
5207 5209 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5208 5210 ddi_put8(acc_handle, &ldio->access_byte,
5209 5211 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5210 5212 ddi_put8(acc_handle, &ldio->sge_count,
5211 5213 acmd->cmd_cookiecnt);
5212 5214 if (instance->flag_ieee) {
5213 5215 mfi_sgl_ieee =
5214 5216 (struct mrsas_sge_ieee *)&ldio->sgl;
5215 5217 } else {
5216 5218 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5217 5219 }
5218 5220
5219 5221 context = ddi_get32(acc_handle, &ldio->context);
5220 5222
5221 5223 if (acmd->cmd_cdblen == CDB_GROUP0) {
5222 5224 /* 6-byte cdb */
5223 5225 ddi_put32(acc_handle, &ldio->lba_count, (
5224 5226 (uint16_t)(pkt->pkt_cdbp[4])));
5225 5227
5226 5228 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5227 5229 ((uint32_t)(pkt->pkt_cdbp[3])) |
5228 5230 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5229 5231 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5230 5232 << 16)));
5231 5233 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5232 5234 /* 10-byte cdb */
5233 5235 ddi_put32(acc_handle, &ldio->lba_count, (
5234 5236 ((uint16_t)(pkt->pkt_cdbp[8])) |
5235 5237 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5236 5238
5237 5239 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5238 5240 ((uint32_t)(pkt->pkt_cdbp[5])) |
5239 5241 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5240 5242 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5241 5243 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5242 5244 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5243 5245 /* 12-byte cdb */
5244 5246 ddi_put32(acc_handle, &ldio->lba_count, (
5245 5247 ((uint32_t)(pkt->pkt_cdbp[9])) |
5246 5248 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5247 5249 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5248 5250 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5249 5251
5250 5252 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5251 5253 ((uint32_t)(pkt->pkt_cdbp[5])) |
5252 5254 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5253 5255 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5254 5256 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5255 5257 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5256 5258 /* 16-byte cdb */
5257 5259 ddi_put32(acc_handle, &ldio->lba_count, (
5258 5260 ((uint32_t)(pkt->pkt_cdbp[13])) |
5259 5261 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5260 5262 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5261 5263 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5262 5264
5263 5265 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5264 5266 ((uint32_t)(pkt->pkt_cdbp[9])) |
5265 5267 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5266 5268 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5267 5269 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5268 5270
5269 5271 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5270 5272 ((uint32_t)(pkt->pkt_cdbp[5])) |
5271 5273 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5272 5274 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5273 5275 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5274 5276 }
5275 5277
5276 5278 break;
5277 5279 }
5278 5280 /* fall through For all non-rd/wr and physical disk cmds */
5279 5281 default:
5280 5282
5281 5283 switch (pkt->pkt_cdbp[0]) {
5282 5284 case SCMD_MODE_SENSE:
5283 5285 case SCMD_MODE_SENSE_G1: {
5284 5286 union scsi_cdb *cdbp;
5285 5287 uint16_t page_code;
5286 5288
5287 5289 cdbp = (void *)pkt->pkt_cdbp;
5288 5290 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5289 5291 switch (page_code) {
5290 5292 case 0x3:
5291 5293 case 0x4:
5292 5294 (void) mrsas_mode_sense_build(pkt);
5293 5295 mrsas_return_mfi_pkt(instance, cmd);
5294 5296 *cmd_done = 1;
5295 5297 return (NULL);
5296 5298 }
5297 5299 break;
5298 5300 }
5299 5301 default:
5300 5302 break;
5301 5303 }
5302 5304
5303 5305 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5304 5306
5305 5307 /* prepare the DCDB frame */
5306 5308 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5307 5309 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5308 5310 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5309 5311 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5310 5312 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5311 5313 ddi_put8(acc_handle, &pthru->lun, 0);
5312 5314 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5313 5315 ddi_put16(acc_handle, &pthru->timeout, 0);
5314 5316 ddi_put16(acc_handle, &pthru->flags, flags);
5315 5317 tmp_data_xfer_len = 0;
5316 5318 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5317 5319 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5318 5320 }
5319 5321 ddi_put32(acc_handle, &pthru->data_xfer_len,
5320 5322 tmp_data_xfer_len);
5321 5323 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5322 5324 if (instance->flag_ieee) {
5323 5325 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5324 5326 } else {
5325 5327 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5326 5328 }
5327 5329
5328 5330 bzero(cmd->sense, SENSE_LENGTH);
5329 5331 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5330 5332 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5331 5333 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5332 5334 cmd->sense_phys_addr);
5333 5335
5334 5336 context = ddi_get32(acc_handle, &pthru->context);
5335 5337 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5336 5338 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5337 5339
5338 5340 break;
5339 5341 }
5340 5342 #ifdef lint
5341 5343 context = context;
5342 5344 #endif
5343 5345 /* prepare the scatter-gather list for the firmware */
5344 5346 if (instance->flag_ieee) {
5345 5347 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5346 5348 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5347 5349 acmd->cmd_dmacookies[i].dmac_laddress);
5348 5350 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5349 5351 acmd->cmd_dmacookies[i].dmac_size);
5350 5352 }
5351 5353 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5352 5354 } else {
5353 5355 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5354 5356 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5355 5357 acmd->cmd_dmacookies[i].dmac_laddress);
5356 5358 ddi_put32(acc_handle, &mfi_sgl->length,
5357 5359 acmd->cmd_dmacookies[i].dmac_size);
5358 5360 }
5359 5361 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5360 5362 }
5361 5363
5362 5364 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5363 5365 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5364 5366
5365 5367 if (cmd->frame_count >= 8) {
5366 5368 cmd->frame_count = 8;
5367 5369 }
5368 5370
5369 5371 return (cmd);
5370 5372 }
5371 5373
5372 5374 /*
5373 5375 * wait_for_outstanding - Wait for all outstanding cmds
5374 5376 * @instance: Adapter soft state
5375 5377 *
5376 5378 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5377 5379 * complete all its outstanding commands. Returns error if one or more IOs
5378 5380 * are pending after this time period.
5379 5381 */
5380 5382 static int
5381 5383 wait_for_outstanding(struct mrsas_instance *instance)
5382 5384 {
5383 5385 int i;
5384 5386 uint32_t wait_time = 90;
5385 5387
5386 5388 for (i = 0; i < wait_time; i++) {
5387 5389 if (!instance->fw_outstanding) {
5388 5390 break;
5389 5391 }
5390 5392
5391 5393 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5392 5394 }
5393 5395
5394 5396 if (instance->fw_outstanding) {
5395 5397 return (1);
5396 5398 }
5397 5399
5398 5400 return (0);
5399 5401 }
5400 5402
5401 5403 /*
5402 5404 * issue_mfi_pthru
5403 5405 */
5404 5406 static int
5405 5407 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5406 5408 struct mrsas_cmd *cmd, int mode)
5407 5409 {
5408 5410 void *ubuf;
5409 5411 uint32_t kphys_addr = 0;
5410 5412 uint32_t xferlen = 0;
5411 5413 uint32_t new_xfer_length = 0;
5412 5414 uint_t model;
5413 5415 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5414 5416 dma_obj_t pthru_dma_obj;
5415 5417 struct mrsas_pthru_frame *kpthru;
5416 5418 struct mrsas_pthru_frame *pthru;
5417 5419 int i;
5418 5420 pthru = &cmd->frame->pthru;
5419 5421 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5420 5422
5421 5423 if (instance->adapterresetinprogress) {
5422 5424 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5423 5425 "returning mfi_pkt and setting TRAN_BUSY\n"));
5424 5426 return (DDI_FAILURE);
5425 5427 }
5426 5428 model = ddi_model_convert_from(mode & FMODELS);
5427 5429 if (model == DDI_MODEL_ILP32) {
5428 5430 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5429 5431
5430 5432 xferlen = kpthru->sgl.sge32[0].length;
5431 5433
5432 5434 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5433 5435 } else {
5434 5436 #ifdef _ILP32
5435 5437 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5436 5438 xferlen = kpthru->sgl.sge32[0].length;
5437 5439 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5438 5440 #else
5439 5441 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5440 5442 xferlen = kpthru->sgl.sge64[0].length;
5441 5443 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5442 5444 #endif
5443 5445 }
5444 5446
5445 5447 if (xferlen) {
5446 5448 /* means IOCTL requires DMA */
5447 5449 /* allocate the data transfer buffer */
5448 5450 /* pthru_dma_obj.size = xferlen; */
5449 5451 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5450 5452 PAGESIZE);
5451 5453 pthru_dma_obj.size = new_xfer_length;
5452 5454 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5453 5455 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5454 5456 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5455 5457 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5456 5458 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5457 5459
5458 5460 /* allocate kernel buffer for DMA */
5459 5461 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5460 5462 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5461 5463 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5462 5464 "could not allocate data transfer buffer."));
5463 5465 return (DDI_FAILURE);
5464 5466 }
5465 5467 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5466 5468
5467 5469 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5468 5470 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5469 5471 for (i = 0; i < xferlen; i++) {
5470 5472 if (ddi_copyin((uint8_t *)ubuf+i,
5471 5473 (uint8_t *)pthru_dma_obj.buffer+i,
5472 5474 1, mode)) {
5473 5475 con_log(CL_ANN, (CE_WARN,
5474 5476 "issue_mfi_pthru : "
5475 5477 "copy from user space failed"));
5476 5478 return (DDI_FAILURE);
5477 5479 }
5478 5480 }
5479 5481 }
5480 5482
5481 5483 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5482 5484 }
5483 5485
5484 5486 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5485 5487 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5486 5488 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5487 5489 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5488 5490 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5489 5491 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5490 5492 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5491 5493 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5492 5494 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5493 5495 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5494 5496
5495 5497 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5496 5498 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5497 5499 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5498 5500
5499 5501 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5500 5502 pthru->cdb_len, DDI_DEV_AUTOINCR);
5501 5503
5502 5504 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5503 5505 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5504 5506 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5505 5507
5506 5508 cmd->sync_cmd = MRSAS_TRUE;
5507 5509 cmd->frame_count = 1;
5508 5510
5509 5511 if (instance->tbolt) {
5510 5512 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5511 5513 }
5512 5514
5513 5515 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5514 5516 con_log(CL_ANN, (CE_WARN,
5515 5517 "issue_mfi_pthru: fw_ioctl failed"));
5516 5518 } else {
5517 5519 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5518 5520 for (i = 0; i < xferlen; i++) {
5519 5521 if (ddi_copyout(
5520 5522 (uint8_t *)pthru_dma_obj.buffer+i,
5521 5523 (uint8_t *)ubuf+i, 1, mode)) {
5522 5524 con_log(CL_ANN, (CE_WARN,
5523 5525 "issue_mfi_pthru : "
5524 5526 "copy to user space failed"));
5525 5527 return (DDI_FAILURE);
5526 5528 }
5527 5529 }
5528 5530 }
5529 5531 }
5530 5532
5531 5533 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5532 5534 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5533 5535
5534 5536 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5535 5537 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5536 5538 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5537 5539 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5538 5540
5539 5541 if (kpthru->sense_len) {
5540 5542 uint_t sense_len = SENSE_LENGTH;
5541 5543 void *sense_ubuf =
5542 5544 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5543 5545 if (kpthru->sense_len <= SENSE_LENGTH) {
5544 5546 sense_len = kpthru->sense_len;
5545 5547 }
5546 5548
5547 5549 for (i = 0; i < sense_len; i++) {
5548 5550 if (ddi_copyout(
5549 5551 (uint8_t *)cmd->sense+i,
5550 5552 (uint8_t *)sense_ubuf+i, 1, mode)) {
5551 5553 con_log(CL_ANN, (CE_WARN,
5552 5554 "issue_mfi_pthru : "
5553 5555 "copy to user space failed"));
5554 5556 }
5555 5557 con_log(CL_DLEVEL1, (CE_WARN,
5556 5558 "Copying Sense info sense_buff[%d] = 0x%X",
5557 5559 i, *((uint8_t *)cmd->sense + i)));
5558 5560 }
5559 5561 }
5560 5562 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5561 5563 DDI_DMA_SYNC_FORDEV);
5562 5564
5563 5565 if (xferlen) {
5564 5566 /* free kernel buffer */
5565 5567 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5566 5568 return (DDI_FAILURE);
5567 5569 }
5568 5570
5569 5571 return (DDI_SUCCESS);
5570 5572 }
5571 5573
5572 5574 /*
5573 5575 * issue_mfi_dcmd
5574 5576 */
5575 5577 static int
5576 5578 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5577 5579 struct mrsas_cmd *cmd, int mode)
5578 5580 {
5579 5581 void *ubuf;
5580 5582 uint32_t kphys_addr = 0;
5581 5583 uint32_t xferlen = 0;
5582 5584 uint32_t new_xfer_length = 0;
5583 5585 uint32_t model;
5584 5586 dma_obj_t dcmd_dma_obj;
5585 5587 struct mrsas_dcmd_frame *kdcmd;
5586 5588 struct mrsas_dcmd_frame *dcmd;
5587 5589 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5588 5590 int i;
5589 5591 dcmd = &cmd->frame->dcmd;
5590 5592 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5591 5593
5592 5594 if (instance->adapterresetinprogress) {
5593 5595 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5594 5596 "returning mfi_pkt and setting TRAN_BUSY"));
5595 5597 return (DDI_FAILURE);
5596 5598 }
5597 5599 model = ddi_model_convert_from(mode & FMODELS);
5598 5600 if (model == DDI_MODEL_ILP32) {
5599 5601 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5600 5602
5601 5603 xferlen = kdcmd->sgl.sge32[0].length;
5602 5604
5603 5605 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5604 5606 } else {
5605 5607 #ifdef _ILP32
5606 5608 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5607 5609 xferlen = kdcmd->sgl.sge32[0].length;
5608 5610 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5609 5611 #else
5610 5612 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5611 5613 xferlen = kdcmd->sgl.sge64[0].length;
5612 5614 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5613 5615 #endif
5614 5616 }
5615 5617 if (xferlen) {
5616 5618 /* means IOCTL requires DMA */
5617 5619 /* allocate the data transfer buffer */
5618 5620 /* dcmd_dma_obj.size = xferlen; */
5619 5621 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5620 5622 PAGESIZE);
5621 5623 dcmd_dma_obj.size = new_xfer_length;
5622 5624 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5623 5625 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5624 5626 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5625 5627 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5626 5628 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5627 5629
5628 5630 /* allocate kernel buffer for DMA */
5629 5631 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5630 5632 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5631 5633 con_log(CL_ANN,
5632 5634 (CE_WARN, "issue_mfi_dcmd: could not "
5633 5635 "allocate data transfer buffer."));
5634 5636 return (DDI_FAILURE);
5635 5637 }
5636 5638 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5637 5639
5638 5640 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5639 5641 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5640 5642 for (i = 0; i < xferlen; i++) {
5641 5643 if (ddi_copyin((uint8_t *)ubuf + i,
5642 5644 (uint8_t *)dcmd_dma_obj.buffer + i,
5643 5645 1, mode)) {
5644 5646 con_log(CL_ANN, (CE_WARN,
5645 5647 "issue_mfi_dcmd : "
5646 5648 "copy from user space failed"));
5647 5649 return (DDI_FAILURE);
5648 5650 }
5649 5651 }
5650 5652 }
5651 5653
5652 5654 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5653 5655 }
5654 5656
5655 5657 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5656 5658 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5657 5659 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5658 5660 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5659 5661 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5660 5662 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5661 5663
5662 5664 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5663 5665 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5664 5666
5665 5667 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5666 5668 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5667 5669 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5668 5670
5669 5671 cmd->sync_cmd = MRSAS_TRUE;
5670 5672 cmd->frame_count = 1;
5671 5673
5672 5674 if (instance->tbolt) {
5673 5675 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5674 5676 }
5675 5677
5676 5678 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5677 5679 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5678 5680 } else {
5679 5681 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5680 5682 for (i = 0; i < xferlen; i++) {
5681 5683 if (ddi_copyout(
5682 5684 (uint8_t *)dcmd_dma_obj.buffer + i,
5683 5685 (uint8_t *)ubuf + i,
5684 5686 1, mode)) {
5685 5687 con_log(CL_ANN, (CE_WARN,
5686 5688 "issue_mfi_dcmd : "
5687 5689 "copy to user space failed"));
5688 5690 return (DDI_FAILURE);
5689 5691 }
5690 5692 }
5691 5693 }
5692 5694 }
5693 5695
5694 5696 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5695 5697 con_log(CL_ANN,
5696 5698 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5697 5699 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5698 5700 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5699 5701
5700 5702 if (xferlen) {
5701 5703 /* free kernel buffer */
5702 5704 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5703 5705 return (DDI_FAILURE);
5704 5706 }
5705 5707
5706 5708 return (DDI_SUCCESS);
5707 5709 }
5708 5710
5709 5711 /*
5710 5712 * issue_mfi_smp
5711 5713 */
5712 5714 static int
5713 5715 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5714 5716 struct mrsas_cmd *cmd, int mode)
5715 5717 {
5716 5718 void *request_ubuf;
5717 5719 void *response_ubuf;
5718 5720 uint32_t request_xferlen = 0;
5719 5721 uint32_t response_xferlen = 0;
5720 5722 uint32_t new_xfer_length1 = 0;
5721 5723 uint32_t new_xfer_length2 = 0;
5722 5724 uint_t model;
5723 5725 dma_obj_t request_dma_obj;
5724 5726 dma_obj_t response_dma_obj;
5725 5727 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5726 5728 struct mrsas_smp_frame *ksmp;
5727 5729 struct mrsas_smp_frame *smp;
5728 5730 struct mrsas_sge32 *sge32;
5729 5731 #ifndef _ILP32
5730 5732 struct mrsas_sge64 *sge64;
5731 5733 #endif
5732 5734 int i;
5733 5735 uint64_t tmp_sas_addr;
5734 5736
5735 5737 smp = &cmd->frame->smp;
5736 5738 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5737 5739
5738 5740 if (instance->adapterresetinprogress) {
5739 5741 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5740 5742 "returning mfi_pkt and setting TRAN_BUSY\n"));
5741 5743 return (DDI_FAILURE);
5742 5744 }
5743 5745 model = ddi_model_convert_from(mode & FMODELS);
5744 5746 if (model == DDI_MODEL_ILP32) {
5745 5747 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5746 5748
5747 5749 sge32 = &ksmp->sgl[0].sge32[0];
5748 5750 response_xferlen = sge32[0].length;
5749 5751 request_xferlen = sge32[1].length;
5750 5752 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5751 5753 "response_xferlen = %x, request_xferlen = %x",
5752 5754 response_xferlen, request_xferlen));
5753 5755
5754 5756 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5755 5757 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5756 5758 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5757 5759 "response_ubuf = %p, request_ubuf = %p",
5758 5760 response_ubuf, request_ubuf));
5759 5761 } else {
5760 5762 #ifdef _ILP32
5761 5763 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5762 5764
5763 5765 sge32 = &ksmp->sgl[0].sge32[0];
5764 5766 response_xferlen = sge32[0].length;
5765 5767 request_xferlen = sge32[1].length;
5766 5768 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5767 5769 "response_xferlen = %x, request_xferlen = %x",
5768 5770 response_xferlen, request_xferlen));
5769 5771
5770 5772 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5771 5773 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5772 5774 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5773 5775 "response_ubuf = %p, request_ubuf = %p",
5774 5776 response_ubuf, request_ubuf));
5775 5777 #else
5776 5778 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5777 5779
5778 5780 sge64 = &ksmp->sgl[0].sge64[0];
5779 5781 response_xferlen = sge64[0].length;
5780 5782 request_xferlen = sge64[1].length;
5781 5783
5782 5784 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5783 5785 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5784 5786 #endif
5785 5787 }
5786 5788 if (request_xferlen) {
5787 5789 /* means IOCTL requires DMA */
5788 5790 /* allocate the data transfer buffer */
5789 5791 /* request_dma_obj.size = request_xferlen; */
5790 5792 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5791 5793 new_xfer_length1, PAGESIZE);
5792 5794 request_dma_obj.size = new_xfer_length1;
5793 5795 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5794 5796 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5795 5797 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5796 5798 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5797 5799 request_dma_obj.dma_attr.dma_attr_align = 1;
5798 5800
5799 5801 /* allocate kernel buffer for DMA */
5800 5802 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5801 5803 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5802 5804 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5803 5805 "could not allocate data transfer buffer."));
5804 5806 return (DDI_FAILURE);
5805 5807 }
5806 5808 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5807 5809
5808 5810 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5809 5811 for (i = 0; i < request_xferlen; i++) {
5810 5812 if (ddi_copyin((uint8_t *)request_ubuf + i,
5811 5813 (uint8_t *)request_dma_obj.buffer + i,
5812 5814 1, mode)) {
5813 5815 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5814 5816 "copy from user space failed"));
5815 5817 return (DDI_FAILURE);
5816 5818 }
5817 5819 }
5818 5820 }
5819 5821
5820 5822 if (response_xferlen) {
5821 5823 /* means IOCTL requires DMA */
5822 5824 /* allocate the data transfer buffer */
5823 5825 /* response_dma_obj.size = response_xferlen; */
5824 5826 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5825 5827 new_xfer_length2, PAGESIZE);
5826 5828 response_dma_obj.size = new_xfer_length2;
5827 5829 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5828 5830 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5829 5831 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5830 5832 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5831 5833 response_dma_obj.dma_attr.dma_attr_align = 1;
5832 5834
5833 5835 /* allocate kernel buffer for DMA */
5834 5836 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5835 5837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5836 5838 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5837 5839 "could not allocate data transfer buffer."));
5838 5840 return (DDI_FAILURE);
5839 5841 }
5840 5842 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5841 5843
5842 5844 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5843 5845 for (i = 0; i < response_xferlen; i++) {
5844 5846 if (ddi_copyin((uint8_t *)response_ubuf + i,
5845 5847 (uint8_t *)response_dma_obj.buffer + i,
5846 5848 1, mode)) {
5847 5849 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5848 5850 "copy from user space failed"));
5849 5851 return (DDI_FAILURE);
5850 5852 }
5851 5853 }
5852 5854 }
5853 5855
5854 5856 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5855 5857 ddi_put8(acc_handle, &smp->cmd_status, 0);
5856 5858 ddi_put8(acc_handle, &smp->connection_status, 0);
5857 5859 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5858 5860 /* smp->context = ksmp->context; */
5859 5861 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5860 5862 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5861 5863
5862 5864 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5863 5865 sizeof (uint64_t));
5864 5866 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5865 5867
5866 5868 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5867 5869
5868 5870 model = ddi_model_convert_from(mode & FMODELS);
5869 5871 if (model == DDI_MODEL_ILP32) {
5870 5872 con_log(CL_ANN1, (CE_CONT,
5871 5873 "issue_mfi_smp: DDI_MODEL_ILP32"));
5872 5874
5873 5875 sge32 = &smp->sgl[0].sge32[0];
5874 5876 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5875 5877 ddi_put32(acc_handle, &sge32[0].phys_addr,
5876 5878 response_dma_obj.dma_cookie[0].dmac_address);
5877 5879 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5878 5880 ddi_put32(acc_handle, &sge32[1].phys_addr,
5879 5881 request_dma_obj.dma_cookie[0].dmac_address);
5880 5882 } else {
5881 5883 #ifdef _ILP32
5882 5884 con_log(CL_ANN1, (CE_CONT,
5883 5885 "issue_mfi_smp: DDI_MODEL_ILP32"));
5884 5886 sge32 = &smp->sgl[0].sge32[0];
5885 5887 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5886 5888 ddi_put32(acc_handle, &sge32[0].phys_addr,
5887 5889 response_dma_obj.dma_cookie[0].dmac_address);
5888 5890 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5889 5891 ddi_put32(acc_handle, &sge32[1].phys_addr,
5890 5892 request_dma_obj.dma_cookie[0].dmac_address);
5891 5893 #else
5892 5894 con_log(CL_ANN1, (CE_CONT,
5893 5895 "issue_mfi_smp: DDI_MODEL_LP64"));
5894 5896 sge64 = &smp->sgl[0].sge64[0];
5895 5897 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5896 5898 ddi_put64(acc_handle, &sge64[0].phys_addr,
5897 5899 response_dma_obj.dma_cookie[0].dmac_address);
5898 5900 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5899 5901 ddi_put64(acc_handle, &sge64[1].phys_addr,
5900 5902 request_dma_obj.dma_cookie[0].dmac_address);
5901 5903 #endif
5902 5904 }
5903 5905 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5904 5906 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5905 5907 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5906 5908 ddi_get32(acc_handle, &sge32[1].length),
5907 5909 ddi_get32(acc_handle, &smp->data_xfer_len)));
5908 5910
5909 5911 cmd->sync_cmd = MRSAS_TRUE;
5910 5912 cmd->frame_count = 1;
5911 5913
5912 5914 if (instance->tbolt) {
5913 5915 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5914 5916 }
5915 5917
5916 5918 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5917 5919 con_log(CL_ANN, (CE_WARN,
5918 5920 "issue_mfi_smp: fw_ioctl failed"));
5919 5921 } else {
5920 5922 con_log(CL_ANN1, (CE_CONT,
5921 5923 "issue_mfi_smp: copy to user space"));
5922 5924
5923 5925 if (request_xferlen) {
5924 5926 for (i = 0; i < request_xferlen; i++) {
5925 5927 if (ddi_copyout(
5926 5928 (uint8_t *)request_dma_obj.buffer +
5927 5929 i, (uint8_t *)request_ubuf + i,
5928 5930 1, mode)) {
5929 5931 con_log(CL_ANN, (CE_WARN,
5930 5932 "issue_mfi_smp : copy to user space"
5931 5933 " failed"));
5932 5934 return (DDI_FAILURE);
5933 5935 }
5934 5936 }
5935 5937 }
5936 5938
5937 5939 if (response_xferlen) {
5938 5940 for (i = 0; i < response_xferlen; i++) {
5939 5941 if (ddi_copyout(
5940 5942 (uint8_t *)response_dma_obj.buffer
5941 5943 + i, (uint8_t *)response_ubuf
5942 5944 + i, 1, mode)) {
5943 5945 con_log(CL_ANN, (CE_WARN,
5944 5946 "issue_mfi_smp : copy to "
5945 5947 "user space failed"));
5946 5948 return (DDI_FAILURE);
5947 5949 }
5948 5950 }
5949 5951 }
5950 5952 }
5951 5953
5952 5954 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5953 5955 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5954 5956 ksmp->cmd_status));
5955 5957 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5956 5958
5957 5959 if (request_xferlen) {
5958 5960 /* free kernel buffer */
5959 5961 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5960 5962 DDI_SUCCESS)
5961 5963 return (DDI_FAILURE);
5962 5964 }
5963 5965
5964 5966 if (response_xferlen) {
5965 5967 /* free kernel buffer */
5966 5968 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5967 5969 DDI_SUCCESS)
5968 5970 return (DDI_FAILURE);
5969 5971 }
5970 5972
5971 5973 return (DDI_SUCCESS);
5972 5974 }
5973 5975
5974 5976 /*
5975 5977 * issue_mfi_stp
5976 5978 */
5977 5979 static int
5978 5980 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5979 5981 struct mrsas_cmd *cmd, int mode)
5980 5982 {
5981 5983 void *fis_ubuf;
5982 5984 void *data_ubuf;
5983 5985 uint32_t fis_xferlen = 0;
5984 5986 uint32_t new_xfer_length1 = 0;
5985 5987 uint32_t new_xfer_length2 = 0;
5986 5988 uint32_t data_xferlen = 0;
5987 5989 uint_t model;
5988 5990 dma_obj_t fis_dma_obj;
5989 5991 dma_obj_t data_dma_obj;
5990 5992 struct mrsas_stp_frame *kstp;
5991 5993 struct mrsas_stp_frame *stp;
5992 5994 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5993 5995 int i;
5994 5996
5995 5997 stp = &cmd->frame->stp;
5996 5998 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5997 5999
5998 6000 if (instance->adapterresetinprogress) {
5999 6001 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
6000 6002 "returning mfi_pkt and setting TRAN_BUSY\n"));
6001 6003 return (DDI_FAILURE);
6002 6004 }
6003 6005 model = ddi_model_convert_from(mode & FMODELS);
6004 6006 if (model == DDI_MODEL_ILP32) {
6005 6007 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6006 6008
6007 6009 fis_xferlen = kstp->sgl.sge32[0].length;
6008 6010 data_xferlen = kstp->sgl.sge32[1].length;
6009 6011
6010 6012 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6011 6013 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6012 6014 } else {
6013 6015 #ifdef _ILP32
6014 6016 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6015 6017
6016 6018 fis_xferlen = kstp->sgl.sge32[0].length;
6017 6019 data_xferlen = kstp->sgl.sge32[1].length;
6018 6020
6019 6021 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6020 6022 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6021 6023 #else
6022 6024 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
6023 6025
6024 6026 fis_xferlen = kstp->sgl.sge64[0].length;
6025 6027 data_xferlen = kstp->sgl.sge64[1].length;
6026 6028
6027 6029 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
6028 6030 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
6029 6031 #endif
6030 6032 }
6031 6033
6032 6034
6033 6035 if (fis_xferlen) {
6034 6036 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6035 6037 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6036 6038
6037 6039 /* means IOCTL requires DMA */
6038 6040 /* allocate the data transfer buffer */
6039 6041 /* fis_dma_obj.size = fis_xferlen; */
6040 6042 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6041 6043 new_xfer_length1, PAGESIZE);
6042 6044 fis_dma_obj.size = new_xfer_length1;
6043 6045 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6044 6046 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6045 6047 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6046 6048 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6047 6049 fis_dma_obj.dma_attr.dma_attr_align = 1;
6048 6050
6049 6051 /* allocate kernel buffer for DMA */
6050 6052 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6051 6053 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6052 6054 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6053 6055 "could not allocate data transfer buffer."));
6054 6056 return (DDI_FAILURE);
6055 6057 }
6056 6058 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6057 6059
6058 6060 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6059 6061 for (i = 0; i < fis_xferlen; i++) {
6060 6062 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6061 6063 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6062 6064 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6063 6065 "copy from user space failed"));
6064 6066 return (DDI_FAILURE);
6065 6067 }
6066 6068 }
6067 6069 }
6068 6070
6069 6071 if (data_xferlen) {
6070 6072 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6071 6073 "data_xferlen = %x", data_ubuf, data_xferlen));
6072 6074
6073 6075 /* means IOCTL requires DMA */
6074 6076 /* allocate the data transfer buffer */
6075 6077 /* data_dma_obj.size = data_xferlen; */
6076 6078 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6077 6079 PAGESIZE);
6078 6080 data_dma_obj.size = new_xfer_length2;
6079 6081 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6080 6082 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6081 6083 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6082 6084 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6083 6085 data_dma_obj.dma_attr.dma_attr_align = 1;
6084 6086
6085 6087 /* allocate kernel buffer for DMA */
6086 6088 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6087 6089 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6088 6090 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6089 6091 "could not allocate data transfer buffer."));
6090 6092 return (DDI_FAILURE);
6091 6093 }
6092 6094 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6093 6095
6094 6096 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6095 6097 for (i = 0; i < data_xferlen; i++) {
6096 6098 if (ddi_copyin((uint8_t *)data_ubuf + i,
6097 6099 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6098 6100 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6099 6101 "copy from user space failed"));
6100 6102 return (DDI_FAILURE);
6101 6103 }
6102 6104 }
6103 6105 }
6104 6106
6105 6107 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6106 6108 ddi_put8(acc_handle, &stp->cmd_status, 0);
6107 6109 ddi_put8(acc_handle, &stp->connection_status, 0);
6108 6110 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6109 6111 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6110 6112
6111 6113 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6112 6114 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6113 6115
6114 6116 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6115 6117 DDI_DEV_AUTOINCR);
6116 6118
6117 6119 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6118 6120 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6119 6121 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6120 6122 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6121 6123 fis_dma_obj.dma_cookie[0].dmac_address);
6122 6124 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6123 6125 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6124 6126 data_dma_obj.dma_cookie[0].dmac_address);
6125 6127
6126 6128 cmd->sync_cmd = MRSAS_TRUE;
6127 6129 cmd->frame_count = 1;
6128 6130
6129 6131 if (instance->tbolt) {
6130 6132 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6131 6133 }
6132 6134
6133 6135 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6134 6136 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6135 6137 } else {
6136 6138
6137 6139 if (fis_xferlen) {
6138 6140 for (i = 0; i < fis_xferlen; i++) {
6139 6141 if (ddi_copyout(
6140 6142 (uint8_t *)fis_dma_obj.buffer + i,
6141 6143 (uint8_t *)fis_ubuf + i, 1, mode)) {
6142 6144 con_log(CL_ANN, (CE_WARN,
6143 6145 "issue_mfi_stp : copy to "
6144 6146 "user space failed"));
6145 6147 return (DDI_FAILURE);
6146 6148 }
6147 6149 }
6148 6150 }
6149 6151 }
6150 6152 if (data_xferlen) {
6151 6153 for (i = 0; i < data_xferlen; i++) {
6152 6154 if (ddi_copyout(
6153 6155 (uint8_t *)data_dma_obj.buffer + i,
6154 6156 (uint8_t *)data_ubuf + i, 1, mode)) {
6155 6157 con_log(CL_ANN, (CE_WARN,
6156 6158 "issue_mfi_stp : copy to"
6157 6159 " user space failed"));
6158 6160 return (DDI_FAILURE);
6159 6161 }
6160 6162 }
6161 6163 }
6162 6164
6163 6165 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6164 6166 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6165 6167 kstp->cmd_status));
6166 6168 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6167 6169
6168 6170 if (fis_xferlen) {
6169 6171 /* free kernel buffer */
6170 6172 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6171 6173 return (DDI_FAILURE);
6172 6174 }
6173 6175
6174 6176 if (data_xferlen) {
6175 6177 /* free kernel buffer */
6176 6178 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6177 6179 return (DDI_FAILURE);
6178 6180 }
6179 6181
6180 6182 return (DDI_SUCCESS);
6181 6183 }
6182 6184
6183 6185 /*
6184 6186 * fill_up_drv_ver
6185 6187 */
6186 6188 void
6187 6189 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6188 6190 {
6189 6191 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6190 6192
6191 6193 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6192 6194 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6193 6195 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6194 6196 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6195 6197 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6196 6198 strlen(MRSAS_RELDATE));
6197 6199
6198 6200 }
6199 6201
6200 6202 /*
6201 6203 * handle_drv_ioctl
6202 6204 */
6203 6205 static int
6204 6206 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6205 6207 int mode)
6206 6208 {
6207 6209 int i;
6208 6210 int rval = DDI_SUCCESS;
6209 6211 int *props = NULL;
6210 6212 void *ubuf;
6211 6213
6212 6214 uint8_t *pci_conf_buf;
6213 6215 uint32_t xferlen;
6214 6216 uint32_t num_props;
6215 6217 uint_t model;
6216 6218 struct mrsas_dcmd_frame *kdcmd;
6217 6219 struct mrsas_drv_ver dv;
6218 6220 struct mrsas_pci_information pi;
6219 6221
6220 6222 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6221 6223
6222 6224 model = ddi_model_convert_from(mode & FMODELS);
6223 6225 if (model == DDI_MODEL_ILP32) {
6224 6226 con_log(CL_ANN1, (CE_CONT,
6225 6227 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6226 6228
6227 6229 xferlen = kdcmd->sgl.sge32[0].length;
6228 6230
6229 6231 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6230 6232 } else {
6231 6233 #ifdef _ILP32
6232 6234 con_log(CL_ANN1, (CE_CONT,
6233 6235 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6234 6236 xferlen = kdcmd->sgl.sge32[0].length;
6235 6237 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6236 6238 #else
6237 6239 con_log(CL_ANN1, (CE_CONT,
6238 6240 "handle_drv_ioctl: DDI_MODEL_LP64"));
6239 6241 xferlen = kdcmd->sgl.sge64[0].length;
6240 6242 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6241 6243 #endif
6242 6244 }
6243 6245 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6244 6246 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6245 6247
6246 6248 switch (kdcmd->opcode) {
6247 6249 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6248 6250 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6249 6251 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6250 6252
6251 6253 fill_up_drv_ver(&dv);
6252 6254
6253 6255 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6254 6256 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6255 6257 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6256 6258 "copy to user space failed"));
6257 6259 kdcmd->cmd_status = 1;
6258 6260 rval = 1;
6259 6261 } else {
6260 6262 kdcmd->cmd_status = 0;
6261 6263 }
6262 6264 break;
6263 6265 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6264 6266 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6265 6267 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6266 6268
6267 6269 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6268 6270 0, "reg", &props, &num_props)) {
6269 6271 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6270 6272 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6271 6273 "ddi_prop_look_int_array failed"));
6272 6274 rval = DDI_FAILURE;
6273 6275 } else {
6274 6276
6275 6277 pi.busNumber = (props[0] >> 16) & 0xFF;
6276 6278 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6277 6279 pi.functionNumber = (props[0] >> 8) & 0x7;
6278 6280 ddi_prop_free((void *)props);
6279 6281 }
6280 6282
6281 6283 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6282 6284
6283 6285 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6284 6286 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6285 6287 i++) {
6286 6288 pci_conf_buf[i] =
6287 6289 pci_config_get8(instance->pci_handle, i);
6288 6290 }
6289 6291
6290 6292 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6291 6293 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6292 6294 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6293 6295 "copy to user space failed"));
6294 6296 kdcmd->cmd_status = 1;
6295 6297 rval = 1;
6296 6298 } else {
6297 6299 kdcmd->cmd_status = 0;
6298 6300 }
6299 6301 break;
6300 6302 default:
6301 6303 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6302 6304 "invalid driver specific IOCTL opcode = 0x%x",
6303 6305 kdcmd->opcode));
6304 6306 kdcmd->cmd_status = 1;
6305 6307 rval = DDI_FAILURE;
6306 6308 break;
6307 6309 }
6308 6310
6309 6311 return (rval);
6310 6312 }
6311 6313
6312 6314 /*
6313 6315 * handle_mfi_ioctl
6314 6316 */
6315 6317 static int
6316 6318 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6317 6319 int mode)
6318 6320 {
6319 6321 int rval = DDI_SUCCESS;
6320 6322
6321 6323 struct mrsas_header *hdr;
6322 6324 struct mrsas_cmd *cmd;
6323 6325
6324 6326 if (instance->tbolt) {
6325 6327 cmd = get_raid_msg_mfi_pkt(instance);
6326 6328 } else {
6327 6329 cmd = mrsas_get_mfi_pkt(instance);
6328 6330 }
6329 6331 if (!cmd) {
6330 6332 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6331 6333 "failed to get a cmd packet"));
6332 6334 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6333 6335 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6334 6336 return (DDI_FAILURE);
6335 6337 }
6336 6338
6337 6339 /* Clear the frame buffer and assign back the context id */
6338 6340 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6339 6341 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6340 6342 cmd->index);
6341 6343
6342 6344 hdr = (struct mrsas_header *)&ioctl->frame[0];
6343 6345
6344 6346 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6345 6347 case MFI_CMD_OP_DCMD:
6346 6348 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6347 6349 break;
6348 6350 case MFI_CMD_OP_SMP:
6349 6351 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6350 6352 break;
6351 6353 case MFI_CMD_OP_STP:
6352 6354 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6353 6355 break;
6354 6356 case MFI_CMD_OP_LD_SCSI:
6355 6357 case MFI_CMD_OP_PD_SCSI:
6356 6358 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6357 6359 break;
6358 6360 default:
6359 6361 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6360 6362 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6361 6363 rval = DDI_FAILURE;
6362 6364 break;
6363 6365 }
6364 6366
6365 6367 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6366 6368 rval = DDI_FAILURE;
6367 6369
6368 6370 if (instance->tbolt) {
6369 6371 return_raid_msg_mfi_pkt(instance, cmd);
6370 6372 } else {
6371 6373 mrsas_return_mfi_pkt(instance, cmd);
6372 6374 }
6373 6375
6374 6376 return (rval);
6375 6377 }
6376 6378
6377 6379 /*
6378 6380 * AEN
6379 6381 */
6380 6382 static int
6381 6383 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6382 6384 {
6383 6385 int rval = 0;
6384 6386
6385 6387 rval = register_mfi_aen(instance, instance->aen_seq_num,
6386 6388 aen->class_locale_word);
6387 6389
6388 6390 aen->cmd_status = (uint8_t)rval;
6389 6391
6390 6392 return (rval);
6391 6393 }
6392 6394
6393 6395 static int
6394 6396 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6395 6397 uint32_t class_locale_word)
6396 6398 {
6397 6399 int ret_val;
6398 6400
6399 6401 struct mrsas_cmd *cmd, *aen_cmd;
6400 6402 struct mrsas_dcmd_frame *dcmd;
6401 6403 union mrsas_evt_class_locale curr_aen;
6402 6404 union mrsas_evt_class_locale prev_aen;
6403 6405
6404 6406 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6405 6407 /*
6406 6408 * If there an AEN pending already (aen_cmd), check if the
6407 6409 * class_locale of that pending AEN is inclusive of the new
6408 6410 * AEN request we currently have. If it is, then we don't have
6409 6411 * to do anything. In other words, whichever events the current
6410 6412 * AEN request is subscribing to, have already been subscribed
6411 6413 * to.
6412 6414 *
6413 6415 * If the old_cmd is _not_ inclusive, then we have to abort
6414 6416 * that command, form a class_locale that is superset of both
6415 6417 * old and current and re-issue to the FW
6416 6418 */
6417 6419
6418 6420 curr_aen.word = LE_32(class_locale_word);
6419 6421 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6420 6422 aen_cmd = instance->aen_cmd;
6421 6423 if (aen_cmd) {
6422 6424 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6423 6425 &aen_cmd->frame->dcmd.mbox.w[1]);
6424 6426 prev_aen.word = LE_32(prev_aen.word);
6425 6427 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6426 6428 /*
6427 6429 * A class whose enum value is smaller is inclusive of all
6428 6430 * higher values. If a PROGRESS (= -1) was previously
6429 6431 * registered, then a new registration requests for higher
6430 6432 * classes need not be sent to FW. They are automatically
6431 6433 * included.
6432 6434 *
6433 6435 * Locale numbers don't have such hierarchy. They are bitmap
6434 6436 * values
6435 6437 */
6436 6438 if ((prev_aen.members.class <= curr_aen.members.class) &&
6437 6439 !((prev_aen.members.locale & curr_aen.members.locale) ^
6438 6440 curr_aen.members.locale)) {
6439 6441 /*
6440 6442 * Previously issued event registration includes
6441 6443 * current request. Nothing to do.
6442 6444 */
6443 6445
6444 6446 return (0);
6445 6447 } else {
6446 6448 curr_aen.members.locale |= prev_aen.members.locale;
6447 6449
6448 6450 if (prev_aen.members.class < curr_aen.members.class)
6449 6451 curr_aen.members.class = prev_aen.members.class;
6450 6452
6451 6453 ret_val = abort_aen_cmd(instance, aen_cmd);
6452 6454
6453 6455 if (ret_val) {
6454 6456 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6455 6457 "failed to abort prevous AEN command"));
6456 6458
6457 6459 return (ret_val);
6458 6460 }
6459 6461 }
6460 6462 } else {
6461 6463 curr_aen.word = LE_32(class_locale_word);
6462 6464 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6463 6465 }
6464 6466
6465 6467 if (instance->tbolt) {
6466 6468 cmd = get_raid_msg_mfi_pkt(instance);
6467 6469 } else {
6468 6470 cmd = mrsas_get_mfi_pkt(instance);
6469 6471 }
6470 6472
6471 6473 if (!cmd) {
6472 6474 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6473 6475 uint16_t, instance->max_fw_cmds);
6474 6476 return (ENOMEM);
6475 6477 }
6476 6478
6477 6479 /* Clear the frame buffer and assign back the context id */
6478 6480 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6479 6481 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6480 6482 cmd->index);
6481 6483
6482 6484 dcmd = &cmd->frame->dcmd;
6483 6485
6484 6486 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6485 6487 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6486 6488
6487 6489 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6488 6490 sizeof (struct mrsas_evt_detail));
6489 6491
6490 6492 /* Prepare DCMD for aen registration */
6491 6493 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6492 6494 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6493 6495 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6494 6496 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6495 6497 MFI_FRAME_DIR_READ);
6496 6498 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6497 6499 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6498 6500 sizeof (struct mrsas_evt_detail));
6499 6501 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6500 6502 MR_DCMD_CTRL_EVENT_WAIT);
6501 6503 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6502 6504 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6503 6505 curr_aen.word = LE_32(curr_aen.word);
6504 6506 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6505 6507 curr_aen.word);
6506 6508 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6507 6509 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6508 6510 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6509 6511 sizeof (struct mrsas_evt_detail));
6510 6512
6511 6513 instance->aen_seq_num = seq_num;
6512 6514
6513 6515
6514 6516 /*
6515 6517 * Store reference to the cmd used to register for AEN. When an
6516 6518 * application wants us to register for AEN, we have to abort this
6517 6519 * cmd and re-register with a new EVENT LOCALE supplied by that app
6518 6520 */
6519 6521 instance->aen_cmd = cmd;
6520 6522
6521 6523 cmd->frame_count = 1;
6522 6524
6523 6525 /* Issue the aen registration frame */
6524 6526 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6525 6527 if (instance->tbolt) {
6526 6528 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6527 6529 }
6528 6530 instance->func_ptr->issue_cmd(cmd, instance);
6529 6531
6530 6532 return (0);
6531 6533 }
6532 6534
6533 6535 void
6534 6536 display_scsi_inquiry(caddr_t scsi_inq)
6535 6537 {
6536 6538 #define MAX_SCSI_DEVICE_CODE 14
6537 6539 int i;
6538 6540 char inquiry_buf[256] = {0};
6539 6541 int len;
6540 6542 const char *const scsi_device_types[] = {
6541 6543 "Direct-Access ",
6542 6544 "Sequential-Access",
6543 6545 "Printer ",
6544 6546 "Processor ",
6545 6547 "WORM ",
6546 6548 "CD-ROM ",
6547 6549 "Scanner ",
6548 6550 "Optical Device ",
6549 6551 "Medium Changer ",
6550 6552 "Communications ",
6551 6553 "Unknown ",
6552 6554 "Unknown ",
6553 6555 "Unknown ",
6554 6556 "Enclosure ",
6555 6557 };
6556 6558
6557 6559 len = 0;
6558 6560
6559 6561 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6560 6562 for (i = 8; i < 16; i++) {
6561 6563 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6562 6564 scsi_inq[i]);
6563 6565 }
6564 6566
6565 6567 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6566 6568
6567 6569 for (i = 16; i < 32; i++) {
6568 6570 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6569 6571 scsi_inq[i]);
6570 6572 }
6571 6573
6572 6574 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6573 6575
6574 6576 for (i = 32; i < 36; i++) {
6575 6577 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6576 6578 scsi_inq[i]);
6577 6579 }
6578 6580
6579 6581 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6580 6582
6581 6583
6582 6584 i = scsi_inq[0] & 0x1f;
6583 6585
6584 6586
6585 6587 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6586 6588 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6587 6589 "Unknown ");
6588 6590
6589 6591
6590 6592 len += snprintf(inquiry_buf + len, 265 - len,
6591 6593 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6592 6594
6593 6595 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6594 6596 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6595 6597 } else {
6596 6598 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6597 6599 }
6598 6600
6599 6601 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6600 6602 }
6601 6603
6602 6604 static void
6603 6605 io_timeout_checker(void *arg)
6604 6606 {
6605 6607 struct scsi_pkt *pkt;
6606 6608 struct mrsas_instance *instance = arg;
6607 6609 struct mrsas_cmd *cmd = NULL;
6608 6610 struct mrsas_header *hdr;
6609 6611 int time = 0;
6610 6612 int counter = 0;
6611 6613 struct mlist_head *pos, *next;
6612 6614 mlist_t process_list;
6613 6615
6614 6616 if (instance->adapterresetinprogress == 1) {
6615 6617 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6616 6618 " reset in progress"));
6617 6619
6618 6620 instance->timeout_id = timeout(io_timeout_checker,
6619 6621 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6620 6622 return;
6621 6623 }
6622 6624
6623 6625 /* See if this check needs to be in the beginning or last in ISR */
6624 6626 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6625 6627 cmn_err(CE_WARN, "io_timeout_checker: "
6626 6628 "FW Fault, calling reset adapter");
6627 6629 cmn_err(CE_CONT, "io_timeout_checker: "
6628 6630 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6629 6631 instance->fw_outstanding, instance->max_fw_cmds);
6630 6632 if (instance->adapterresetinprogress == 0) {
6631 6633 instance->adapterresetinprogress = 1;
6632 6634 if (instance->tbolt)
6633 6635 (void) mrsas_tbolt_reset_ppc(instance);
6634 6636 else
6635 6637 (void) mrsas_reset_ppc(instance);
6636 6638 instance->adapterresetinprogress = 0;
6637 6639 }
6638 6640 instance->timeout_id = timeout(io_timeout_checker,
6639 6641 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6640 6642 return;
6641 6643 }
6642 6644
6643 6645 INIT_LIST_HEAD(&process_list);
6644 6646
6645 6647 mutex_enter(&instance->cmd_pend_mtx);
6646 6648 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6647 6649 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6648 6650
6649 6651 if (cmd == NULL) {
6650 6652 continue;
6651 6653 }
6652 6654
6653 6655 if (cmd->sync_cmd == MRSAS_TRUE) {
6654 6656 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6655 6657 if (hdr == NULL) {
6656 6658 continue;
6657 6659 }
6658 6660 time = --cmd->drv_pkt_time;
6659 6661 } else {
6660 6662 pkt = cmd->pkt;
6661 6663 if (pkt == NULL) {
6662 6664 continue;
6663 6665 }
6664 6666 time = --cmd->drv_pkt_time;
6665 6667 }
6666 6668 if (time <= 0) {
6667 6669 cmn_err(CE_WARN, "%llx: "
6668 6670 "io_timeout_checker: TIMING OUT: pkt: %p, "
6669 6671 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6670 6672 gethrtime(), (void *)pkt, (void *)cmd,
6671 6673 instance->fw_outstanding, instance->max_fw_cmds);
6672 6674
6673 6675 counter++;
6674 6676 break;
6675 6677 }
6676 6678 }
6677 6679 mutex_exit(&instance->cmd_pend_mtx);
6678 6680
6679 6681 if (counter) {
6680 6682 if (instance->disable_online_ctrl_reset == 1) {
6681 6683 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6682 6684 "supported by Firmware, KILL adapter!!!",
6683 6685 instance->instance, __func__);
6684 6686
6685 6687 if (instance->tbolt)
6686 6688 mrsas_tbolt_kill_adapter(instance);
6687 6689 else
6688 6690 (void) mrsas_kill_adapter(instance);
6689 6691
6690 6692 return;
6691 6693 } else {
6692 6694 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6693 6695 if (instance->adapterresetinprogress == 0) {
6694 6696 if (instance->tbolt) {
6695 6697 (void) mrsas_tbolt_reset_ppc(
6696 6698 instance);
6697 6699 } else {
6698 6700 (void) mrsas_reset_ppc(
6699 6701 instance);
6700 6702 }
6701 6703 }
6702 6704 } else {
6703 6705 cmn_err(CE_WARN,
6704 6706 "io_timeout_checker: "
6705 6707 "cmd %p cmd->index %d "
6706 6708 "timed out even after 3 resets: "
6707 6709 "so KILL adapter", (void *)cmd, cmd->index);
6708 6710
6709 6711 mrsas_print_cmd_details(instance, cmd, 0xDD);
6710 6712
6711 6713 if (instance->tbolt)
6712 6714 mrsas_tbolt_kill_adapter(instance);
6713 6715 else
6714 6716 (void) mrsas_kill_adapter(instance);
6715 6717 return;
6716 6718 }
6717 6719 }
6718 6720 }
6719 6721 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6720 6722 "schedule next timeout check: "
6721 6723 "do timeout \n"));
6722 6724 instance->timeout_id =
6723 6725 timeout(io_timeout_checker, (void *)instance,
6724 6726 drv_usectohz(MRSAS_1_SECOND));
6725 6727 }
6726 6728
6727 6729 static uint32_t
6728 6730 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6729 6731 {
6730 6732 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6731 6733 }
6732 6734
6733 6735 static void
6734 6736 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6735 6737 {
6736 6738 struct scsi_pkt *pkt;
6737 6739 atomic_inc_16(&instance->fw_outstanding);
6738 6740
6739 6741 pkt = cmd->pkt;
6740 6742 if (pkt) {
6741 6743 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6742 6744 "ISSUED CMD TO FW : called : cmd:"
6743 6745 ": %p instance : %p pkt : %p pkt_time : %x\n",
6744 6746 gethrtime(), (void *)cmd, (void *)instance,
6745 6747 (void *)pkt, cmd->drv_pkt_time));
6746 6748 if (instance->adapterresetinprogress) {
6747 6749 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6748 6750 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6749 6751 } else {
6750 6752 push_pending_mfi_pkt(instance, cmd);
6751 6753 }
6752 6754
6753 6755 } else {
6754 6756 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6755 6757 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6756 6758 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6757 6759 }
6758 6760
6759 6761 mutex_enter(&instance->reg_write_mtx);
6760 6762 /* Issue the command to the FW */
6761 6763 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6762 6764 (((cmd->frame_count - 1) << 1) | 1), instance);
6763 6765 mutex_exit(&instance->reg_write_mtx);
6764 6766
6765 6767 }
6766 6768
6767 6769 /*
6768 6770 * issue_cmd_in_sync_mode
6769 6771 */
6770 6772 static int
6771 6773 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6772 6774 struct mrsas_cmd *cmd)
6773 6775 {
6774 6776 int i;
6775 6777 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6776 6778 struct mrsas_header *hdr = &cmd->frame->hdr;
6777 6779
6778 6780 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6779 6781
6780 6782 if (instance->adapterresetinprogress) {
6781 6783 cmd->drv_pkt_time = ddi_get16(
6782 6784 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6783 6785 if (cmd->drv_pkt_time < debug_timeout_g)
6784 6786 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6785 6787
6786 6788 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6787 6789 "issue and return in reset case\n"));
6788 6790 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6789 6791 (((cmd->frame_count - 1) << 1) | 1), instance);
6790 6792
6791 6793 return (DDI_SUCCESS);
6792 6794 } else {
6793 6795 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6794 6796 push_pending_mfi_pkt(instance, cmd);
6795 6797 }
6796 6798
6797 6799 cmd->cmd_status = ENODATA;
6798 6800
6799 6801 mutex_enter(&instance->reg_write_mtx);
6800 6802 /* Issue the command to the FW */
6801 6803 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6802 6804 (((cmd->frame_count - 1) << 1) | 1), instance);
6803 6805 mutex_exit(&instance->reg_write_mtx);
6804 6806
6805 6807 mutex_enter(&instance->int_cmd_mtx);
6806 6808 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6807 6809 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6808 6810 }
6809 6811 mutex_exit(&instance->int_cmd_mtx);
6810 6812
6811 6813 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6812 6814
6813 6815 if (i < (msecs -1)) {
6814 6816 return (DDI_SUCCESS);
6815 6817 } else {
6816 6818 return (DDI_FAILURE);
6817 6819 }
6818 6820 }
6819 6821
6820 6822 /*
6821 6823 * issue_cmd_in_poll_mode
6822 6824 */
6823 6825 static int
6824 6826 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6825 6827 struct mrsas_cmd *cmd)
6826 6828 {
6827 6829 int i;
6828 6830 uint16_t flags;
6829 6831 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6830 6832 struct mrsas_header *frame_hdr;
6831 6833
6832 6834 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6833 6835
6834 6836 frame_hdr = (struct mrsas_header *)cmd->frame;
6835 6837 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6836 6838 MFI_CMD_STATUS_POLL_MODE);
6837 6839 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6838 6840 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6839 6841
6840 6842 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6841 6843
6842 6844 /* issue the frame using inbound queue port */
6843 6845 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6844 6846 (((cmd->frame_count - 1) << 1) | 1), instance);
6845 6847
6846 6848 /* wait for cmd_status to change from 0xFF */
6847 6849 for (i = 0; i < msecs && (
6848 6850 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6849 6851 == MFI_CMD_STATUS_POLL_MODE); i++) {
6850 6852 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6851 6853 }
6852 6854
6853 6855 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6854 6856 == MFI_CMD_STATUS_POLL_MODE) {
6855 6857 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6856 6858 "cmd polling timed out"));
6857 6859 return (DDI_FAILURE);
6858 6860 }
6859 6861
6860 6862 return (DDI_SUCCESS);
6861 6863 }
6862 6864
6863 6865 static void
6864 6866 enable_intr_ppc(struct mrsas_instance *instance)
6865 6867 {
6866 6868 uint32_t mask;
6867 6869
6868 6870 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6869 6871
6870 6872 if (instance->skinny) {
6871 6873 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6872 6874 WR_OB_INTR_MASK(0xfffffffe, instance);
6873 6875 } else {
6874 6876 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6875 6877 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6876 6878
6877 6879 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6878 6880 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6879 6881 }
6880 6882
6881 6883 /* dummy read to force PCI flush */
6882 6884 mask = RD_OB_INTR_MASK(instance);
6883 6885
6884 6886 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6885 6887 "outbound_intr_mask = 0x%x", mask));
6886 6888 }
6887 6889
6888 6890 static void
6889 6891 disable_intr_ppc(struct mrsas_instance *instance)
6890 6892 {
6891 6893 uint32_t mask;
6892 6894
6893 6895 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6894 6896
6895 6897 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6896 6898 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6897 6899
6898 6900 /* For now, assume there are no extras needed for Skinny support. */
6899 6901
6900 6902 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6901 6903
6902 6904 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6903 6905 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6904 6906
6905 6907 /* dummy read to force PCI flush */
6906 6908 mask = RD_OB_INTR_MASK(instance);
6907 6909 #ifdef lint
6908 6910 mask = mask;
6909 6911 #endif
6910 6912 }
6911 6913
6912 6914 static int
6913 6915 intr_ack_ppc(struct mrsas_instance *instance)
6914 6916 {
6915 6917 uint32_t status;
6916 6918 int ret = DDI_INTR_CLAIMED;
6917 6919
6918 6920 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6919 6921
6920 6922 /* check if it is our interrupt */
6921 6923 status = RD_OB_INTR_STATUS(instance);
6922 6924
6923 6925 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6924 6926
6925 6927 /*
6926 6928 * NOTE: Some drivers call out SKINNY here, but the return is the same
6927 6929 * for SKINNY and 2108.
6928 6930 */
6929 6931 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6930 6932 ret = DDI_INTR_UNCLAIMED;
6931 6933 }
6932 6934
6933 6935 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6934 6936 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6935 6937 ret = DDI_INTR_UNCLAIMED;
6936 6938 }
6937 6939
6938 6940 if (ret == DDI_INTR_UNCLAIMED) {
6939 6941 return (ret);
6940 6942 }
6941 6943
6942 6944 /*
6943 6945 * Clear the interrupt by writing back the same value.
6944 6946 * Another case where SKINNY is slightly different.
6945 6947 */
6946 6948 if (instance->skinny) {
6947 6949 WR_OB_INTR_STATUS(status, instance);
6948 6950 } else {
6949 6951 WR_OB_DOORBELL_CLEAR(status, instance);
6950 6952 }
6951 6953
6952 6954 /* dummy READ */
6953 6955 status = RD_OB_INTR_STATUS(instance);
6954 6956
6955 6957 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6956 6958
6957 6959 return (ret);
6958 6960 }
6959 6961
6960 6962 /*
6961 6963 * Marks HBA as bad. This will be called either when an
6962 6964 * IO packet times out even after 3 FW resets
6963 6965 * or FW is found to be fault even after 3 continuous resets.
6964 6966 */
6965 6967
6966 6968 static int
6967 6969 mrsas_kill_adapter(struct mrsas_instance *instance)
6968 6970 {
6969 6971 if (instance->deadadapter == 1)
6970 6972 return (DDI_FAILURE);
6971 6973
6972 6974 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6973 6975 "Writing to doorbell with MFI_STOP_ADP "));
6974 6976 mutex_enter(&instance->ocr_flags_mtx);
6975 6977 instance->deadadapter = 1;
6976 6978 mutex_exit(&instance->ocr_flags_mtx);
6977 6979 instance->func_ptr->disable_intr(instance);
6978 6980 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6979 6981 (void) mrsas_complete_pending_cmds(instance);
6980 6982 return (DDI_SUCCESS);
6981 6983 }
6982 6984
6983 6985
6984 6986 static int
6985 6987 mrsas_reset_ppc(struct mrsas_instance *instance)
6986 6988 {
6987 6989 uint32_t status;
6988 6990 uint32_t retry = 0;
6989 6991 uint32_t cur_abs_reg_val;
6990 6992 uint32_t fw_state;
6991 6993
6992 6994 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6993 6995
6994 6996 if (instance->deadadapter == 1) {
6995 6997 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6996 6998 "no more resets as HBA has been marked dead ");
6997 6999 return (DDI_FAILURE);
6998 7000 }
6999 7001 mutex_enter(&instance->ocr_flags_mtx);
7000 7002 instance->adapterresetinprogress = 1;
7001 7003 mutex_exit(&instance->ocr_flags_mtx);
7002 7004 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
7003 7005 "flag set, time %llx", gethrtime()));
7004 7006
7005 7007 instance->func_ptr->disable_intr(instance);
7006 7008 retry_reset:
7007 7009 WR_IB_WRITE_SEQ(0, instance);
7008 7010 WR_IB_WRITE_SEQ(4, instance);
7009 7011 WR_IB_WRITE_SEQ(0xb, instance);
7010 7012 WR_IB_WRITE_SEQ(2, instance);
7011 7013 WR_IB_WRITE_SEQ(7, instance);
7012 7014 WR_IB_WRITE_SEQ(0xd, instance);
7013 7015 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7014 7016 "to write sequence register\n"));
7015 7017 delay(100 * drv_usectohz(MILLISEC));
7016 7018 status = RD_OB_DRWE(instance);
7017 7019
7018 7020 while (!(status & DIAG_WRITE_ENABLE)) {
7019 7021 delay(100 * drv_usectohz(MILLISEC));
7020 7022 status = RD_OB_DRWE(instance);
7021 7023 if (retry++ == 100) {
7022 7024 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7023 7025 "check retry count %d", retry);
7024 7026 return (DDI_FAILURE);
7025 7027 }
7026 7028 }
7027 7029 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7028 7030 delay(100 * drv_usectohz(MILLISEC));
7029 7031 status = RD_OB_DRWE(instance);
7030 7032 while (status & DIAG_RESET_ADAPTER) {
7031 7033 delay(100 * drv_usectohz(MILLISEC));
7032 7034 status = RD_OB_DRWE(instance);
7033 7035 if (retry++ == 100) {
7034 7036 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7035 7037 "RESET FAILED. KILL adapter called.");
7036 7038
7037 7039 (void) mrsas_kill_adapter(instance);
7038 7040 return (DDI_FAILURE);
7039 7041 }
7040 7042 }
7041 7043 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7042 7044 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7043 7045 "Calling mfi_state_transition_to_ready"));
7044 7046
7045 7047 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7046 7048 if (mfi_state_transition_to_ready(instance) ||
7047 7049 debug_fw_faults_after_ocr_g == 1) {
7048 7050 cur_abs_reg_val =
7049 7051 instance->func_ptr->read_fw_status_reg(instance);
7050 7052 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7051 7053
7052 7054 #ifdef OCRDEBUG
7053 7055 con_log(CL_ANN1, (CE_NOTE,
7054 7056 "mrsas_reset_ppc :before fake: FW is not ready "
7055 7057 "FW state = 0x%x", fw_state));
7056 7058 if (debug_fw_faults_after_ocr_g == 1)
7057 7059 fw_state = MFI_STATE_FAULT;
7058 7060 #endif
7059 7061
7060 7062 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7061 7063 "FW state = 0x%x", fw_state));
7062 7064
7063 7065 if (fw_state == MFI_STATE_FAULT) {
7064 7066 /* increment the count */
7065 7067 instance->fw_fault_count_after_ocr++;
7066 7068 if (instance->fw_fault_count_after_ocr
7067 7069 < MAX_FW_RESET_COUNT) {
7068 7070 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7069 7071 "FW is in fault after OCR count %d "
7070 7072 "Retry Reset",
7071 7073 instance->fw_fault_count_after_ocr);
7072 7074 goto retry_reset;
7073 7075
7074 7076 } else {
7075 7077 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7076 7078 "Max Reset Count exceeded >%d"
7077 7079 "Mark HBA as bad, KILL adapter",
7078 7080 MAX_FW_RESET_COUNT);
7079 7081
7080 7082 (void) mrsas_kill_adapter(instance);
7081 7083 return (DDI_FAILURE);
7082 7084 }
7083 7085 }
7084 7086 }
7085 7087 /* reset the counter as FW is up after OCR */
7086 7088 instance->fw_fault_count_after_ocr = 0;
7087 7089
7088 7090
7089 7091 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7090 7092 instance->producer, 0);
7091 7093
7092 7094 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7093 7095 instance->consumer, 0);
7094 7096
7095 7097 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7096 7098 " after resetting produconsumer chck indexs:"
7097 7099 "producer %x consumer %x", *instance->producer,
7098 7100 *instance->consumer));
7099 7101
7100 7102 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7101 7103 "Calling mrsas_issue_init_mfi"));
7102 7104 (void) mrsas_issue_init_mfi(instance);
7103 7105 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7104 7106 "mrsas_issue_init_mfi Done"));
7105 7107
7106 7108 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7107 7109 "Calling mrsas_print_pending_cmd\n"));
7108 7110 (void) mrsas_print_pending_cmds(instance);
7109 7111 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7110 7112 "mrsas_print_pending_cmd done\n"));
7111 7113
7112 7114 instance->func_ptr->enable_intr(instance);
7113 7115 instance->fw_outstanding = 0;
7114 7116
7115 7117 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7116 7118 "Calling mrsas_issue_pending_cmds"));
7117 7119 (void) mrsas_issue_pending_cmds(instance);
7118 7120 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7119 7121 "issue_pending_cmds done.\n"));
7120 7122
7121 7123 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7122 7124 "Calling aen registration"));
7123 7125
7124 7126
7125 7127 instance->aen_cmd->retry_count_for_ocr = 0;
7126 7128 instance->aen_cmd->drv_pkt_time = 0;
7127 7129
7128 7130 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7129 7131 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7130 7132
7131 7133 mutex_enter(&instance->ocr_flags_mtx);
7132 7134 instance->adapterresetinprogress = 0;
7133 7135 mutex_exit(&instance->ocr_flags_mtx);
7134 7136 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7135 7137 "adpterresetinprogress flag unset"));
7136 7138
7137 7139 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7138 7140 return (DDI_SUCCESS);
7139 7141 }
7140 7142
7141 7143 /*
7142 7144 * FMA functions.
7143 7145 */
7144 7146 int
7145 7147 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7146 7148 {
7147 7149 int ret = DDI_SUCCESS;
7148 7150
7149 7151 if (cmd != NULL &&
7150 7152 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7151 7153 DDI_SUCCESS) {
7152 7154 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7153 7155 if (cmd->pkt != NULL) {
7154 7156 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7155 7157 cmd->pkt->pkt_statistics = 0;
7156 7158 }
7157 7159 ret = DDI_FAILURE;
7158 7160 }
7159 7161 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7160 7162 != DDI_SUCCESS) {
7161 7163 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7162 7164 if (cmd != NULL && cmd->pkt != NULL) {
7163 7165 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7164 7166 cmd->pkt->pkt_statistics = 0;
7165 7167 }
7166 7168 ret = DDI_FAILURE;
7167 7169 }
7168 7170 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7169 7171 DDI_SUCCESS) {
7170 7172 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7171 7173 if (cmd != NULL && cmd->pkt != NULL) {
7172 7174 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7173 7175 cmd->pkt->pkt_statistics = 0;
7174 7176 }
7175 7177 ret = DDI_FAILURE;
7176 7178 }
7177 7179 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7178 7180 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7179 7181
7180 7182 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7181 7183
7182 7184 if (cmd != NULL && cmd->pkt != NULL) {
7183 7185 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7184 7186 cmd->pkt->pkt_statistics = 0;
7185 7187 }
7186 7188 ret = DDI_FAILURE;
7187 7189 }
7188 7190
7189 7191 return (ret);
7190 7192 }
7191 7193
7192 7194 /*ARGSUSED*/
7193 7195 static int
7194 7196 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7195 7197 {
7196 7198 /*
7197 7199 * as the driver can always deal with an error in any dma or
7198 7200 * access handle, we can just return the fme_status value.
7199 7201 */
7200 7202 pci_ereport_post(dip, err, NULL);
7201 7203 return (err->fme_status);
7202 7204 }
7203 7205
7204 7206 static void
7205 7207 mrsas_fm_init(struct mrsas_instance *instance)
7206 7208 {
7207 7209 /* Need to change iblock to priority for new MSI intr */
7208 7210 ddi_iblock_cookie_t fm_ibc;
7209 7211
7210 7212 /* Only register with IO Fault Services if we have some capability */
7211 7213 if (instance->fm_capabilities) {
7212 7214 /* Adjust access and dma attributes for FMA */
7213 7215 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7214 7216 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7215 7217
7216 7218 /*
7217 7219 * Register capabilities with IO Fault Services.
7218 7220 * fm_capabilities will be updated to indicate
7219 7221 * capabilities actually supported (not requested.)
7220 7222 */
7221 7223
7222 7224 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7223 7225
7224 7226 /*
7225 7227 * Initialize pci ereport capabilities if ereport
7226 7228 * capable (should always be.)
7227 7229 */
7228 7230
7229 7231 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7230 7232 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7231 7233 pci_ereport_setup(instance->dip);
7232 7234 }
7233 7235
7234 7236 /*
7235 7237 * Register error callback if error callback capable.
7236 7238 */
7237 7239 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7238 7240 ddi_fm_handler_register(instance->dip,
7239 7241 mrsas_fm_error_cb, (void*) instance);
7240 7242 }
7241 7243 } else {
7242 7244 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7243 7245 mrsas_generic_dma_attr.dma_attr_flags = 0;
7244 7246 }
7245 7247 }
7246 7248
7247 7249 static void
7248 7250 mrsas_fm_fini(struct mrsas_instance *instance)
7249 7251 {
7250 7252 /* Only unregister FMA capabilities if registered */
7251 7253 if (instance->fm_capabilities) {
7252 7254 /*
7253 7255 * Un-register error callback if error callback capable.
7254 7256 */
7255 7257 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7256 7258 ddi_fm_handler_unregister(instance->dip);
7257 7259 }
7258 7260
7259 7261 /*
7260 7262 * Release any resources allocated by pci_ereport_setup()
7261 7263 */
7262 7264 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7263 7265 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7264 7266 pci_ereport_teardown(instance->dip);
7265 7267 }
7266 7268
7267 7269 /* Unregister from IO Fault Services */
7268 7270 ddi_fm_fini(instance->dip);
7269 7271
7270 7272 /* Adjust access and dma attributes for FMA */
7271 7273 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7272 7274 mrsas_generic_dma_attr.dma_attr_flags = 0;
7273 7275 }
7274 7276 }
7275 7277
7276 7278 int
7277 7279 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7278 7280 {
7279 7281 ddi_fm_error_t de;
7280 7282
7281 7283 if (handle == NULL) {
7282 7284 return (DDI_FAILURE);
7283 7285 }
7284 7286
7285 7287 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7286 7288
7287 7289 return (de.fme_status);
7288 7290 }
7289 7291
7290 7292 int
7291 7293 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7292 7294 {
7293 7295 ddi_fm_error_t de;
7294 7296
7295 7297 if (handle == NULL) {
7296 7298 return (DDI_FAILURE);
7297 7299 }
7298 7300
7299 7301 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7300 7302
7301 7303 return (de.fme_status);
7302 7304 }
7303 7305
7304 7306 void
7305 7307 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7306 7308 {
7307 7309 uint64_t ena;
7308 7310 char buf[FM_MAX_CLASS];
7309 7311
7310 7312 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7311 7313 ena = fm_ena_generate(0, FM_ENA_FMT1);
7312 7314 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7313 7315 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7314 7316 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7315 7317 }
7316 7318 }
7317 7319
7318 7320 static int
7319 7321 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7320 7322 {
7321 7323
7322 7324 dev_info_t *dip = instance->dip;
7323 7325 int avail, actual, count;
7324 7326 int i, flag, ret;
7325 7327
7326 7328 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7327 7329 intr_type));
7328 7330
7329 7331 /* Get number of interrupts */
7330 7332 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7331 7333 if ((ret != DDI_SUCCESS) || (count == 0)) {
7332 7334 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7333 7335 "ret %d count %d", ret, count));
7334 7336
7335 7337 return (DDI_FAILURE);
7336 7338 }
7337 7339
7338 7340 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7339 7341
7340 7342 /* Get number of available interrupts */
7341 7343 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7342 7344 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7343 7345 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7344 7346 "ret %d avail %d", ret, avail));
7345 7347
7346 7348 return (DDI_FAILURE);
7347 7349 }
7348 7350 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7349 7351
7350 7352 /* Only one interrupt routine. So limit the count to 1 */
7351 7353 if (count > 1) {
7352 7354 count = 1;
7353 7355 }
7354 7356
7355 7357 /*
7356 7358 * Allocate an array of interrupt handlers. Currently we support
7357 7359 * only one interrupt. The framework can be extended later.
7358 7360 */
7359 7361 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7360 7362 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7361 7363 KM_SLEEP);
7362 7364 ASSERT(instance->intr_htable);
7363 7365
7364 7366 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7365 7367 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7366 7368 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7367 7369
7368 7370 /* Allocate interrupt */
7369 7371 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7370 7372 count, &actual, flag);
7371 7373
7372 7374 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7373 7375 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7374 7376 "avail = %d", avail));
7375 7377 goto mrsas_free_htable;
7376 7378 }
7377 7379
7378 7380 if (actual < count) {
7379 7381 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7380 7382 "Requested = %d Received = %d", count, actual));
7381 7383 }
7382 7384 instance->intr_cnt = actual;
7383 7385
7384 7386 /*
7385 7387 * Get the priority of the interrupt allocated.
7386 7388 */
7387 7389 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7388 7390 &instance->intr_pri)) != DDI_SUCCESS) {
7389 7391 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7390 7392 "get priority call failed"));
7391 7393 goto mrsas_free_handles;
7392 7394 }
7393 7395
7394 7396 /*
7395 7397 * Test for high level mutex. we don't support them.
7396 7398 */
7397 7399 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7398 7400 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7399 7401 "High level interrupts not supported."));
7400 7402 goto mrsas_free_handles;
7401 7403 }
7402 7404
7403 7405 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7404 7406 instance->intr_pri));
7405 7407
7406 7408 /* Call ddi_intr_add_handler() */
7407 7409 for (i = 0; i < actual; i++) {
7408 7410 ret = ddi_intr_add_handler(instance->intr_htable[i],
7409 7411 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7410 7412 (caddr_t)(uintptr_t)i);
7411 7413
7412 7414 if (ret != DDI_SUCCESS) {
7413 7415 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7414 7416 "failed %d", ret));
7415 7417 goto mrsas_free_handles;
7416 7418 }
7417 7419
7418 7420 }
7419 7421
7420 7422 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7421 7423
7422 7424 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7423 7425 &instance->intr_cap)) != DDI_SUCCESS) {
7424 7426 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7425 7427 ret));
7426 7428 goto mrsas_free_handlers;
7427 7429 }
7428 7430
7429 7431 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7430 7432 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7431 7433
7432 7434 (void) ddi_intr_block_enable(instance->intr_htable,
7433 7435 instance->intr_cnt);
7434 7436 } else {
7435 7437 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7436 7438
7437 7439 for (i = 0; i < instance->intr_cnt; i++) {
7438 7440 (void) ddi_intr_enable(instance->intr_htable[i]);
7439 7441 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7440 7442 "%d", i));
7441 7443 }
7442 7444 }
7443 7445
7444 7446 return (DDI_SUCCESS);
7445 7447
7446 7448 mrsas_free_handlers:
7447 7449 for (i = 0; i < actual; i++)
7448 7450 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7449 7451
7450 7452 mrsas_free_handles:
7451 7453 for (i = 0; i < actual; i++)
7452 7454 (void) ddi_intr_free(instance->intr_htable[i]);
7453 7455
7454 7456 mrsas_free_htable:
7455 7457 if (instance->intr_htable != NULL)
7456 7458 kmem_free(instance->intr_htable, instance->intr_htable_size);
7457 7459
7458 7460 instance->intr_htable = NULL;
7459 7461 instance->intr_htable_size = 0;
7460 7462
7461 7463 return (DDI_FAILURE);
7462 7464
7463 7465 }
7464 7466
7465 7467
7466 7468 static void
7467 7469 mrsas_rem_intrs(struct mrsas_instance *instance)
7468 7470 {
7469 7471 int i;
7470 7472
7471 7473 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7472 7474
7473 7475 /* Disable all interrupts first */
7474 7476 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7475 7477 (void) ddi_intr_block_disable(instance->intr_htable,
7476 7478 instance->intr_cnt);
7477 7479 } else {
7478 7480 for (i = 0; i < instance->intr_cnt; i++) {
7479 7481 (void) ddi_intr_disable(instance->intr_htable[i]);
7480 7482 }
7481 7483 }
7482 7484
7483 7485 /* Remove all the handlers */
7484 7486
7485 7487 for (i = 0; i < instance->intr_cnt; i++) {
7486 7488 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7487 7489 (void) ddi_intr_free(instance->intr_htable[i]);
7488 7490 }
7489 7491
7490 7492 if (instance->intr_htable != NULL)
7491 7493 kmem_free(instance->intr_htable, instance->intr_htable_size);
7492 7494
7493 7495 instance->intr_htable = NULL;
7494 7496 instance->intr_htable_size = 0;
7495 7497
7496 7498 }
7497 7499
7498 7500 static int
7499 7501 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7500 7502 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7501 7503 {
7502 7504 struct mrsas_instance *instance;
7503 7505 int config;
7504 7506 int rval = NDI_SUCCESS;
7505 7507
7506 7508 char *ptr = NULL;
7507 7509 int tgt, lun;
7508 7510
7509 7511 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7510 7512
7511 7513 if ((instance = ddi_get_soft_state(mrsas_state,
7512 7514 ddi_get_instance(parent))) == NULL) {
7513 7515 return (NDI_FAILURE);
7514 7516 }
7515 7517
7516 7518 /* Hold nexus during bus_config */
7517 7519 ndi_devi_enter(parent, &config);
7518 7520 switch (op) {
7519 7521 case BUS_CONFIG_ONE: {
7520 7522
7521 7523 /* parse wwid/target name out of name given */
7522 7524 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7523 7525 rval = NDI_FAILURE;
7524 7526 break;
7525 7527 }
7526 7528 ptr++;
7527 7529
7528 7530 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7529 7531 rval = NDI_FAILURE;
7530 7532 break;
7531 7533 }
7532 7534
7533 7535 if (lun == 0) {
7534 7536 rval = mrsas_config_ld(instance, tgt, lun, childp);
7535 7537 #ifdef PDSUPPORT
7536 7538 } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7537 7539 rval = mrsas_tbolt_config_pd(instance,
7538 7540 tgt, lun, childp);
7539 7541 #endif
7540 7542 } else {
7541 7543 rval = NDI_FAILURE;
7542 7544 }
7543 7545
7544 7546 break;
7545 7547 }
7546 7548 case BUS_CONFIG_DRIVER:
7547 7549 case BUS_CONFIG_ALL: {
7548 7550
7549 7551 rval = mrsas_config_all_devices(instance);
7550 7552
7551 7553 rval = NDI_SUCCESS;
7552 7554 break;
7553 7555 }
7554 7556 }
7555 7557
7556 7558 if (rval == NDI_SUCCESS) {
7557 7559 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7558 7560
7559 7561 }
7560 7562 ndi_devi_exit(parent, config);
7561 7563
7562 7564 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7563 7565 rval));
7564 7566 return (rval);
7565 7567 }
7566 7568
7567 7569 static int
7568 7570 mrsas_config_all_devices(struct mrsas_instance *instance)
7569 7571 {
7570 7572 int rval, tgt;
7571 7573
7572 7574 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7573 7575 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7574 7576
7575 7577 }
7576 7578
7577 7579 #ifdef PDSUPPORT
7578 7580 /* Config PD devices connected to the card */
7579 7581 if (instance->tbolt || instance->skinny) {
7580 7582 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7581 7583 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7582 7584 }
7583 7585 }
7584 7586 #endif
7585 7587
7586 7588 rval = NDI_SUCCESS;
7587 7589 return (rval);
7588 7590 }
7589 7591
7590 7592 static int
7591 7593 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7592 7594 {
7593 7595 char devbuf[SCSI_MAXNAMELEN];
7594 7596 char *addr;
7595 7597 char *p, *tp, *lp;
7596 7598 long num;
7597 7599
7598 7600 /* Parse dev name and address */
7599 7601 (void) strcpy(devbuf, devnm);
7600 7602 addr = "";
7601 7603 for (p = devbuf; *p != '\0'; p++) {
7602 7604 if (*p == '@') {
7603 7605 addr = p + 1;
7604 7606 *p = '\0';
7605 7607 } else if (*p == ':') {
7606 7608 *p = '\0';
7607 7609 break;
7608 7610 }
7609 7611 }
7610 7612
7611 7613 /* Parse target and lun */
7612 7614 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7613 7615 if (*p == ',') {
7614 7616 lp = p + 1;
7615 7617 *p = '\0';
7616 7618 break;
7617 7619 }
7618 7620 }
7619 7621 if (tgt && tp) {
7620 7622 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7621 7623 return (DDI_FAILURE); /* Can declare this as constant */
7622 7624 }
7623 7625 *tgt = (int)num;
7624 7626 }
7625 7627 if (lun && lp) {
7626 7628 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7627 7629 return (DDI_FAILURE);
7628 7630 }
7629 7631 *lun = (int)num;
7630 7632 }
7631 7633 return (DDI_SUCCESS); /* Success case */
7632 7634 }
7633 7635
7634 7636 static int
7635 7637 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7636 7638 uint8_t lun, dev_info_t **ldip)
7637 7639 {
7638 7640 struct scsi_device *sd;
7639 7641 dev_info_t *child;
7640 7642 int rval;
7641 7643
7642 7644 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7643 7645 tgt, lun));
7644 7646
7645 7647 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7646 7648 if (ldip) {
7647 7649 *ldip = child;
7648 7650 }
7649 7651 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7650 7652 rval = mrsas_service_evt(instance, tgt, 0,
7651 7653 MRSAS_EVT_UNCONFIG_TGT, NULL);
7652 7654 con_log(CL_ANN1, (CE_WARN,
7653 7655 "mr_sas: DELETING STALE ENTRY rval = %d "
7654 7656 "tgt id = %d ", rval, tgt));
7655 7657 return (NDI_FAILURE);
7656 7658 }
7657 7659 return (NDI_SUCCESS);
7658 7660 }
7659 7661
7660 7662 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7661 7663 sd->sd_address.a_hba_tran = instance->tran;
7662 7664 sd->sd_address.a_target = (uint16_t)tgt;
7663 7665 sd->sd_address.a_lun = (uint8_t)lun;
7664 7666
7665 7667 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7666 7668 rval = mrsas_config_scsi_device(instance, sd, ldip);
7667 7669 else
7668 7670 rval = NDI_FAILURE;
7669 7671
7670 7672 /* sd_unprobe is blank now. Free buffer manually */
7671 7673 if (sd->sd_inq) {
7672 7674 kmem_free(sd->sd_inq, SUN_INQSIZE);
7673 7675 sd->sd_inq = (struct scsi_inquiry *)NULL;
7674 7676 }
7675 7677
7676 7678 kmem_free(sd, sizeof (struct scsi_device));
7677 7679 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7678 7680 rval));
7679 7681 return (rval);
7680 7682 }
7681 7683
7682 7684 int
7683 7685 mrsas_config_scsi_device(struct mrsas_instance *instance,
7684 7686 struct scsi_device *sd, dev_info_t **dipp)
7685 7687 {
7686 7688 char *nodename = NULL;
7687 7689 char **compatible = NULL;
7688 7690 int ncompatible = 0;
7689 7691 char *childname;
7690 7692 dev_info_t *ldip = NULL;
7691 7693 int tgt = sd->sd_address.a_target;
7692 7694 int lun = sd->sd_address.a_lun;
7693 7695 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7694 7696 int rval;
7695 7697
7696 7698 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7697 7699 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7698 7700 NULL, &nodename, &compatible, &ncompatible);
7699 7701
7700 7702 if (nodename == NULL) {
7701 7703 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7702 7704 "for t%dL%d", tgt, lun));
7703 7705 rval = NDI_FAILURE;
7704 7706 goto finish;
7705 7707 }
7706 7708
7707 7709 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7708 7710 con_log(CL_DLEVEL1, (CE_NOTE,
7709 7711 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7710 7712
7711 7713 /* Create a dev node */
7712 7714 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7713 7715 con_log(CL_DLEVEL1, (CE_NOTE,
7714 7716 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7715 7717 if (rval == NDI_SUCCESS) {
7716 7718 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7717 7719 DDI_PROP_SUCCESS) {
7718 7720 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7719 7721 "property for t%dl%d target", tgt, lun));
7720 7722 rval = NDI_FAILURE;
7721 7723 goto finish;
7722 7724 }
7723 7725 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7724 7726 DDI_PROP_SUCCESS) {
7725 7727 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7726 7728 "property for t%dl%d lun", tgt, lun));
7727 7729 rval = NDI_FAILURE;
7728 7730 goto finish;
7729 7731 }
7730 7732
7731 7733 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7732 7734 "compatible", compatible, ncompatible) !=
7733 7735 DDI_PROP_SUCCESS) {
7734 7736 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7735 7737 "property for t%dl%d compatible", tgt, lun));
7736 7738 rval = NDI_FAILURE;
7737 7739 goto finish;
7738 7740 }
7739 7741
7740 7742 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7741 7743 if (rval != NDI_SUCCESS) {
7742 7744 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7743 7745 "t%dl%d", tgt, lun));
7744 7746 ndi_prop_remove_all(ldip);
7745 7747 (void) ndi_devi_free(ldip);
7746 7748 } else {
7747 7749 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7748 7750 "0 t%dl%d", tgt, lun));
7749 7751 }
7750 7752
7751 7753 }
7752 7754 finish:
7753 7755 if (dipp) {
7754 7756 *dipp = ldip;
7755 7757 }
7756 7758
7757 7759 con_log(CL_DLEVEL1, (CE_NOTE,
7758 7760 "mr_sas: config_scsi_device rval = %d t%dL%d",
7759 7761 rval, tgt, lun));
7760 7762 scsi_hba_nodename_compatible_free(nodename, compatible);
7761 7763 return (rval);
7762 7764 }
7763 7765
7764 7766 /*ARGSUSED*/
7765 7767 int
7766 7768 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7767 7769 uint64_t wwn)
7768 7770 {
7769 7771 struct mrsas_eventinfo *mrevt = NULL;
7770 7772
7771 7773 con_log(CL_ANN1, (CE_NOTE,
7772 7774 "mrsas_service_evt called for t%dl%d event = %d",
7773 7775 tgt, lun, event));
7774 7776
7775 7777 if ((instance->taskq == NULL) || (mrevt =
7776 7778 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7777 7779 return (ENOMEM);
7778 7780 }
7779 7781
7780 7782 mrevt->instance = instance;
7781 7783 mrevt->tgt = tgt;
7782 7784 mrevt->lun = lun;
7783 7785 mrevt->event = event;
7784 7786 mrevt->wwn = wwn;
7785 7787
7786 7788 if ((ddi_taskq_dispatch(instance->taskq,
7787 7789 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7788 7790 DDI_SUCCESS) {
7789 7791 con_log(CL_ANN1, (CE_NOTE,
7790 7792 "mr_sas: Event task failed for t%dl%d event = %d",
7791 7793 tgt, lun, event));
7792 7794 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7793 7795 return (DDI_FAILURE);
7794 7796 }
7795 7797 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7796 7798 return (DDI_SUCCESS);
7797 7799 }
7798 7800
7799 7801 static void
7800 7802 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7801 7803 {
7802 7804 struct mrsas_instance *instance = mrevt->instance;
7803 7805 dev_info_t *dip, *pdip;
7804 7806 int circ1 = 0;
7805 7807 char *devname;
7806 7808
7807 7809 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7808 7810 " tgt %d lun %d event %d",
7809 7811 mrevt->tgt, mrevt->lun, mrevt->event));
7810 7812
7811 7813 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7812 7814 mutex_enter(&instance->config_dev_mtx);
7813 7815 dip = instance->mr_ld_list[mrevt->tgt].dip;
7814 7816 mutex_exit(&instance->config_dev_mtx);
7815 7817 #ifdef PDSUPPORT
7816 7818 } else {
7817 7819 mutex_enter(&instance->config_dev_mtx);
7818 7820 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7819 7821 mutex_exit(&instance->config_dev_mtx);
7820 7822 #endif
7821 7823 }
7822 7824
7823 7825
7824 7826 ndi_devi_enter(instance->dip, &circ1);
7825 7827 switch (mrevt->event) {
7826 7828 case MRSAS_EVT_CONFIG_TGT:
7827 7829 if (dip == NULL) {
7828 7830
7829 7831 if (mrevt->lun == 0) {
7830 7832 (void) mrsas_config_ld(instance, mrevt->tgt,
7831 7833 0, NULL);
7832 7834 #ifdef PDSUPPORT
7833 7835 } else if (instance->tbolt || instance->skinny) {
7834 7836 (void) mrsas_tbolt_config_pd(instance,
7835 7837 mrevt->tgt,
7836 7838 1, NULL);
7837 7839 #endif
7838 7840 }
7839 7841 con_log(CL_ANN1, (CE_NOTE,
7840 7842 "mr_sas: EVT_CONFIG_TGT called:"
7841 7843 " for tgt %d lun %d event %d",
7842 7844 mrevt->tgt, mrevt->lun, mrevt->event));
7843 7845
7844 7846 } else {
7845 7847 con_log(CL_ANN1, (CE_NOTE,
7846 7848 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7847 7849 " for tgt %d lun %d event %d",
7848 7850 mrevt->tgt, mrevt->lun, mrevt->event));
7849 7851 }
7850 7852 break;
7851 7853 case MRSAS_EVT_UNCONFIG_TGT:
7852 7854 if (dip) {
7853 7855 if (i_ddi_devi_attached(dip)) {
7854 7856
7855 7857 pdip = ddi_get_parent(dip);
7856 7858
7857 7859 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7858 7860 (void) ddi_deviname(dip, devname);
7859 7861
7860 7862 (void) devfs_clean(pdip, devname + 1,
7861 7863 DV_CLEAN_FORCE);
7862 7864 kmem_free(devname, MAXNAMELEN + 1);
7863 7865 }
7864 7866 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7865 7867 con_log(CL_ANN1, (CE_NOTE,
7866 7868 "mr_sas: EVT_UNCONFIG_TGT called:"
7867 7869 " for tgt %d lun %d event %d",
7868 7870 mrevt->tgt, mrevt->lun, mrevt->event));
7869 7871 } else {
7870 7872 con_log(CL_ANN1, (CE_NOTE,
7871 7873 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7872 7874 " for tgt %d lun %d event %d",
7873 7875 mrevt->tgt, mrevt->lun, mrevt->event));
7874 7876 }
7875 7877 break;
7876 7878 }
7877 7879 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7878 7880 ndi_devi_exit(instance->dip, circ1);
7879 7881 }
7880 7882
7881 7883
7882 7884 int
7883 7885 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7884 7886 {
7885 7887 union scsi_cdb *cdbp;
7886 7888 uint16_t page_code;
7887 7889 struct scsa_cmd *acmd;
7888 7890 struct buf *bp;
7889 7891 struct mode_header *modehdrp;
7890 7892
7891 7893 cdbp = (void *)pkt->pkt_cdbp;
7892 7894 page_code = cdbp->cdb_un.sg.scsi[0];
7893 7895 acmd = PKT2CMD(pkt);
7894 7896 bp = acmd->cmd_buf;
7895 7897 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7896 7898 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7897 7899 /* ADD pkt statistics as Command failed. */
7898 7900 return (NULL);
7899 7901 }
7900 7902
7901 7903 bp_mapin(bp);
7902 7904 bzero(bp->b_un.b_addr, bp->b_bcount);
7903 7905
7904 7906 switch (page_code) {
7905 7907 case 0x3: {
7906 7908 struct mode_format *page3p = NULL;
7907 7909 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7908 7910 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7909 7911
7910 7912 page3p = (void *)((caddr_t)modehdrp +
7911 7913 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7912 7914 page3p->mode_page.code = 0x3;
7913 7915 page3p->mode_page.length =
7914 7916 (uchar_t)(sizeof (struct mode_format));
7915 7917 page3p->data_bytes_sect = 512;
7916 7918 page3p->sect_track = 63;
7917 7919 break;
7918 7920 }
7919 7921 case 0x4: {
7920 7922 struct mode_geometry *page4p = NULL;
7921 7923 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7922 7924 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7923 7925
7924 7926 page4p = (void *)((caddr_t)modehdrp +
7925 7927 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7926 7928 page4p->mode_page.code = 0x4;
7927 7929 page4p->mode_page.length =
7928 7930 (uchar_t)(sizeof (struct mode_geometry));
7929 7931 page4p->heads = 255;
7930 7932 page4p->rpm = 10000;
7931 7933 break;
7932 7934 }
7933 7935 default:
7934 7936 break;
7935 7937 }
7936 7938 return (NULL);
7937 7939 }
↓ open down ↓ |
7352 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX