Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c
+++ new/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 #include <sys/scsi/adapters/pmcs/pmcs.h>
25 25
26 26 #define PMCS_DRIVER_VERSION "pmcs HBA device driver"
27 27
28 28 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION;
29 29
30 30 /*
31 31 * Non-DDI Compliant stuff
32 32 */
33 33 extern char hw_serial[];
34 34
35 35 /*
36 36 * Global driver data
37 37 */
38 38 void *pmcs_softc_state = NULL;
39 39 void *pmcs_iport_softstate = NULL;
40 40
41 41 /*
42 42 * Tracing and Logging info
43 43 */
44 44 pmcs_tbuf_t *pmcs_tbuf = NULL;
45 45 uint32_t pmcs_tbuf_num_elems = 0;
46 46 pmcs_tbuf_t *pmcs_tbuf_ptr;
47 47 uint32_t pmcs_tbuf_idx = 0;
48 48 boolean_t pmcs_tbuf_wrap = B_FALSE;
49 49 kmutex_t pmcs_trace_lock;
50 50
51 51 /*
52 52 * If pmcs_force_syslog value is non-zero, all messages put in the trace log
53 53 * will also be sent to system log.
54 54 */
55 55 int pmcs_force_syslog = 0;
56 56 int pmcs_console = 0;
57 57
58 58 /*
59 59 * External References
60 60 */
61 61 extern int ncpus_online;
62 62
63 63 /*
64 64 * Local static data
65 65 */
66 66 static int fwlog_level = 3;
67 67 static int physpeed = PHY_LINK_ALL;
68 68 static int phymode = PHY_LM_AUTO;
69 69 static int block_mask = 0;
70 70 static int phymap_stable_usec = 3 * MICROSEC;
71 71 static int iportmap_stable_usec = 2 * MICROSEC;
72 72 static int iportmap_csync_usec = 20 * MICROSEC;
73 73
74 74 #ifdef DEBUG
75 75 static int debug_mask = 1;
76 76 #else
77 77 static int debug_mask = 0;
78 78 #endif
79 79
80 80 #ifdef DISABLE_MSIX
81 81 static int disable_msix = 1;
82 82 #else
83 83 static int disable_msix = 0;
84 84 #endif
85 85
86 86 #ifdef DISABLE_MSI
87 87 static int disable_msi = 1;
88 88 #else
89 89 static int disable_msi = 0;
90 90 #endif
91 91
92 92 /*
93 93 * DEBUG: testing: allow detach with an active port:
94 94 *
95 95 * # echo 'detach_driver_unconfig/W 10' | mdb -kw
96 96 * # echo 'scsi_hba_bus_unconfig_remove/W 1' | mdb -kw
97 97 * # echo 'pmcs`detach_with_active_port/W 1' | mdb -kw
98 98 * # modunload -i <pmcs_driver_index>
99 99 */
100 100 static int detach_with_active_port = 0;
101 101
102 102 static uint16_t maxqdepth = 0xfffe;
103 103
104 104 /*
105 105 * Local prototypes
106 106 */
107 107 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t);
108 108 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t);
109 109 static int pmcs_unattach(pmcs_hw_t *);
110 110 static int pmcs_iport_unattach(pmcs_iport_t *);
111 111 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long);
112 112 static void pmcs_watchdog(void *);
113 113 static int pmcs_setup_intr(pmcs_hw_t *);
114 114 static int pmcs_teardown_intr(pmcs_hw_t *);
115 115
116 116 static uint_t pmcs_nonio_ix(caddr_t, caddr_t);
117 117 static uint_t pmcs_general_ix(caddr_t, caddr_t);
118 118 static uint_t pmcs_event_ix(caddr_t, caddr_t);
119 119 static uint_t pmcs_iodone_ix(caddr_t, caddr_t);
120 120 static uint_t pmcs_fatal_ix(caddr_t, caddr_t);
121 121 static uint_t pmcs_all_intr(caddr_t, caddr_t);
122 122 static int pmcs_quiesce(dev_info_t *dip);
123 123 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *);
124 124
125 125 static void pmcs_create_all_phy_stats(pmcs_iport_t *);
126 126 int pmcs_update_phy_stats(kstat_t *, int);
127 127
128 128 static void pmcs_fm_fini(pmcs_hw_t *pwp);
129 129 static void pmcs_fm_init(pmcs_hw_t *pwp);
130 130 static int pmcs_fm_error_cb(dev_info_t *dip,
131 131 ddi_fm_error_t *err, const void *impl_data);
132 132
133 133 /*
134 134 * Local configuration data
135 135 */
136 136 static struct dev_ops pmcs_ops = {
137 137 DEVO_REV, /* devo_rev, */
138 138 0, /* refcnt */
139 139 ddi_no_info, /* info */
140 140 nulldev, /* identify */
141 141 nulldev, /* probe */
142 142 pmcs_attach, /* attach */
143 143 pmcs_detach, /* detach */
144 144 nodev, /* reset */
145 145 NULL, /* driver operations */
146 146 NULL, /* bus operations */
↓ open down ↓ |
146 lines elided |
↑ open up ↑ |
147 147 ddi_power, /* power management */
148 148 pmcs_quiesce /* quiesce */
149 149 };
150 150
151 151 static struct modldrv modldrv = {
152 152 &mod_driverops,
153 153 PMCS_DRIVER_VERSION,
154 154 &pmcs_ops, /* driver ops */
155 155 };
156 156 static struct modlinkage modlinkage = {
157 - MODREV_1, &modldrv, NULL
157 + MODREV_1, { &modldrv, NULL }
158 158 };
159 159
160 160 const ddi_dma_attr_t pmcs_dattr = {
161 161 DMA_ATTR_V0, /* dma_attr version */
162 162 0x0000000000000000ull, /* dma_attr_addr_lo */
163 163 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
164 164 0x00000000FFFFFFFFull, /* dma_attr_count_max */
165 165 0x0000000000000001ull, /* dma_attr_align */
166 166 0x00000078, /* dma_attr_burstsizes */
167 167 0x00000001, /* dma_attr_minxfer */
168 168 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */
169 169 0x00000000FFFFFFFFull, /* dma_attr_seg */
170 170 1, /* dma_attr_sgllen */
171 171 512, /* dma_attr_granular */
172 172 0 /* dma_attr_flags */
173 173 };
174 174
175 175 static ddi_device_acc_attr_t rattr = {
176 176 DDI_DEVICE_ATTR_V1,
177 177 DDI_STRUCTURE_LE_ACC,
178 178 DDI_STRICTORDER_ACC,
179 179 DDI_DEFAULT_ACC
180 180 };
181 181
182 182
183 183 /*
184 184 * Attach/Detach functions
185 185 */
186 186
187 187 int
188 188 _init(void)
189 189 {
190 190 int ret;
191 191
192 192 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1);
193 193 if (ret != 0) {
194 194 cmn_err(CE_WARN, "?soft state init failed for pmcs");
195 195 return (ret);
196 196 }
197 197
198 198 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
199 199 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs");
200 200 ddi_soft_state_fini(&pmcs_softc_state);
201 201 return (ret);
202 202 }
203 203
204 204 /*
205 205 * Allocate soft state for iports
206 206 */
207 207 ret = ddi_soft_state_init(&pmcs_iport_softstate,
208 208 sizeof (pmcs_iport_t), 2);
209 209 if (ret != 0) {
210 210 cmn_err(CE_WARN, "?iport soft state init failed for pmcs");
211 211 ddi_soft_state_fini(&pmcs_softc_state);
212 212 return (ret);
213 213 }
214 214
215 215 ret = mod_install(&modlinkage);
216 216 if (ret != 0) {
217 217 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret);
218 218 scsi_hba_fini(&modlinkage);
219 219 ddi_soft_state_fini(&pmcs_iport_softstate);
220 220 ddi_soft_state_fini(&pmcs_softc_state);
221 221 return (ret);
222 222 }
223 223
224 224 /* Initialize the global trace lock */
225 225 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL);
226 226
227 227 return (0);
228 228 }
229 229
230 230 int
231 231 _fini(void)
232 232 {
233 233 int ret;
234 234 if ((ret = mod_remove(&modlinkage)) != 0) {
235 235 return (ret);
236 236 }
237 237 scsi_hba_fini(&modlinkage);
238 238
239 239 /* Free pmcs log buffer and destroy the global lock */
240 240 if (pmcs_tbuf) {
241 241 kmem_free(pmcs_tbuf,
242 242 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t));
243 243 pmcs_tbuf = NULL;
244 244 }
245 245 mutex_destroy(&pmcs_trace_lock);
246 246
247 247 ddi_soft_state_fini(&pmcs_iport_softstate);
248 248 ddi_soft_state_fini(&pmcs_softc_state);
249 249 return (0);
250 250 }
251 251
252 252 int
253 253 _info(struct modinfo *modinfop)
254 254 {
255 255 return (mod_info(&modlinkage, modinfop));
256 256 }
257 257
258 258 static int
259 259 pmcs_iport_attach(dev_info_t *dip)
260 260 {
261 261 pmcs_iport_t *iport;
262 262 pmcs_hw_t *pwp;
263 263 scsi_hba_tran_t *tran;
264 264 void *ua_priv = NULL;
265 265 char *iport_ua;
266 266 char *init_port;
267 267 int hba_inst;
268 268 int inst;
269 269
270 270 hba_inst = ddi_get_instance(ddi_get_parent(dip));
271 271 inst = ddi_get_instance(dip);
272 272
273 273 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst);
274 274 if (pwp == NULL) {
275 275 cmn_err(CE_WARN, "%s: No HBA softstate for instance %d",
276 276 __func__, inst);
277 277 return (DDI_FAILURE);
278 278 }
279 279
280 280 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) {
281 281 return (DDI_FAILURE);
282 282 }
283 283
284 284 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) {
285 285 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
286 286 "%s: invoked with NULL unit address, inst (%d)",
287 287 __func__, inst);
288 288 return (DDI_FAILURE);
289 289 }
290 290
291 291 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) {
292 292 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
293 293 "Failed to alloc soft state for iport %d", inst);
294 294 return (DDI_FAILURE);
295 295 }
296 296
297 297 iport = ddi_get_soft_state(pmcs_iport_softstate, inst);
298 298 if (iport == NULL) {
299 299 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
300 300 "cannot get iport soft state");
301 301 goto iport_attach_fail1;
302 302 }
303 303
304 304 mutex_init(&iport->lock, NULL, MUTEX_DRIVER,
305 305 DDI_INTR_PRI(pwp->intr_pri));
306 306 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL);
307 307 cv_init(&iport->smp_cv, NULL, CV_DEFAULT, NULL);
308 308 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER,
309 309 DDI_INTR_PRI(pwp->intr_pri));
310 310 mutex_init(&iport->smp_lock, NULL, MUTEX_DRIVER,
311 311 DDI_INTR_PRI(pwp->intr_pri));
312 312
313 313 /* Set some data on the iport handle */
314 314 iport->dip = dip;
315 315 iport->pwp = pwp;
316 316
317 317 /* Dup the UA into the iport handle */
318 318 iport->ua = strdup(iport_ua);
319 319
320 320 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
321 321 tran->tran_hba_private = iport;
322 322
323 323 list_create(&iport->phys, sizeof (pmcs_phy_t),
324 324 offsetof(pmcs_phy_t, list_node));
325 325
326 326 /*
327 327 * If our unit address is active in the phymap, configure our
328 328 * iport's phylist.
329 329 */
330 330 mutex_enter(&iport->lock);
331 331 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua);
332 332 if (ua_priv) {
333 333 /* Non-NULL private data indicates the unit address is active */
334 334 iport->ua_state = UA_ACTIVE;
335 335 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
336 336 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
337 337 "%s: failed to "
338 338 "configure phys on iport handle (0x%p), "
339 339 " unit address [%s]", __func__,
340 340 (void *)iport, iport_ua);
341 341 mutex_exit(&iport->lock);
342 342 goto iport_attach_fail2;
343 343 }
344 344 } else {
345 345 iport->ua_state = UA_INACTIVE;
346 346 }
347 347 mutex_exit(&iport->lock);
348 348
349 349 /* Allocate string-based soft state pool for targets */
350 350 iport->tgt_sstate = NULL;
351 351 if (ddi_soft_state_bystr_init(&iport->tgt_sstate,
352 352 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) {
353 353 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
354 354 "cannot get iport tgt soft state");
355 355 goto iport_attach_fail2;
356 356 }
357 357
358 358 /* Create this iport's target map */
359 359 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) {
360 360 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
361 361 "Failed to create tgtmap on iport %d", inst);
362 362 goto iport_attach_fail3;
363 363 }
364 364
365 365 /* Set up the 'initiator-port' DDI property on this iport */
366 366 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
367 367 if (pwp->separate_ports) {
368 368 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
369 369 "%s: separate ports not supported", __func__);
370 370 } else {
371 371 /* Set initiator-port value to the HBA's base WWN */
372 372 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1,
373 373 init_port);
374 374 }
375 375
376 376 mutex_enter(&iport->lock);
377 377 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING,
378 378 SCSI_ADDR_PROP_INITIATOR_PORT, init_port);
379 379 kmem_free(init_port, PMCS_MAX_UA_SIZE);
380 380
381 381 /* Set up a 'num-phys' DDI property for the iport node */
382 382 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
383 383 &iport->nphy);
384 384 mutex_exit(&iport->lock);
385 385
386 386 /* Create kstats for each of the phys in this port */
387 387 pmcs_create_all_phy_stats(iport);
388 388
389 389 /*
390 390 * Insert this iport handle into our list and set
391 391 * iports_attached on the HBA node.
392 392 */
393 393 rw_enter(&pwp->iports_lock, RW_WRITER);
394 394 ASSERT(!list_link_active(&iport->list_node));
395 395 list_insert_tail(&pwp->iports, iport);
396 396 pwp->iports_attached = 1;
397 397 pwp->num_iports++;
398 398 rw_exit(&pwp->iports_lock);
399 399
400 400 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
401 401 "iport%d attached", inst);
402 402 ddi_report_dev(dip);
403 403 return (DDI_SUCCESS);
404 404
405 405 /* teardown and fail */
406 406 iport_attach_fail3:
407 407 ddi_soft_state_bystr_fini(&iport->tgt_sstate);
408 408 iport_attach_fail2:
409 409 list_destroy(&iport->phys);
410 410 strfree(iport->ua);
411 411 mutex_destroy(&iport->refcnt_lock);
412 412 mutex_destroy(&iport->smp_lock);
413 413 cv_destroy(&iport->refcnt_cv);
414 414 cv_destroy(&iport->smp_cv);
415 415 mutex_destroy(&iport->lock);
416 416 iport_attach_fail1:
417 417 ddi_soft_state_free(pmcs_iport_softstate, inst);
418 418 return (DDI_FAILURE);
419 419 }
420 420
421 421 static int
422 422 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
423 423 {
424 424 scsi_hba_tran_t *tran;
425 425 char chiprev, *fwsupport, hw_rev[24], fw_rev[24];
426 426 off_t set3size;
427 427 int inst, i;
428 428 int sm_hba = 1;
429 429 int protocol = 0;
430 430 int num_phys = 0;
431 431 pmcs_hw_t *pwp;
432 432 pmcs_phy_t *phyp;
433 433 uint32_t num_threads;
434 434 char buf[64];
435 435 char *fwl_file;
436 436
437 437 switch (cmd) {
438 438 case DDI_ATTACH:
439 439 break;
440 440
441 441 case DDI_PM_RESUME:
442 442 case DDI_RESUME:
443 443 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
444 444 if (!tran) {
445 445 return (DDI_FAILURE);
446 446 }
447 447 /* No DDI_?_RESUME on iport nodes */
448 448 if (scsi_hba_iport_unit_address(dip) != NULL) {
449 449 return (DDI_SUCCESS);
450 450 }
451 451 pwp = TRAN2PMC(tran);
452 452 if (pwp == NULL) {
453 453 return (DDI_FAILURE);
454 454 }
455 455
456 456 mutex_enter(&pwp->lock);
457 457 pwp->suspended = 0;
458 458 if (pwp->tq) {
459 459 ddi_taskq_resume(pwp->tq);
460 460 }
461 461 mutex_exit(&pwp->lock);
462 462 return (DDI_SUCCESS);
463 463
464 464 default:
465 465 return (DDI_FAILURE);
466 466 }
467 467
468 468 /*
469 469 * If this is an iport node, invoke iport attach.
470 470 */
471 471 if (scsi_hba_iport_unit_address(dip) != NULL) {
472 472 return (pmcs_iport_attach(dip));
473 473 }
474 474
475 475 /*
476 476 * From here on is attach for the HBA node
477 477 */
478 478
479 479 #ifdef DEBUG
480 480 /*
481 481 * Check to see if this unit is to be disabled. We can't disable
482 482 * on a per-iport node. It's either the entire HBA or nothing.
483 483 */
484 484 (void) snprintf(buf, sizeof (buf),
485 485 "disable-instance-%d", ddi_get_instance(dip));
486 486 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
487 487 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) {
488 488 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration",
489 489 ddi_get_instance(dip));
490 490 return (DDI_FAILURE);
491 491 }
492 492 #endif
493 493
494 494 /*
495 495 * Allocate softstate
496 496 */
497 497 inst = ddi_get_instance(dip);
498 498 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) {
499 499 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst);
500 500 return (DDI_FAILURE);
501 501 }
502 502
503 503 pwp = ddi_get_soft_state(pmcs_softc_state, inst);
504 504 if (pwp == NULL) {
505 505 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst);
506 506 ddi_soft_state_free(pmcs_softc_state, inst);
507 507 return (DDI_FAILURE);
508 508 }
509 509 pwp->dip = dip;
510 510 STAILQ_INIT(&pwp->dq);
511 511 STAILQ_INIT(&pwp->cq);
512 512 STAILQ_INIT(&pwp->wf);
513 513 STAILQ_INIT(&pwp->pf);
514 514
515 515 /*
516 516 * Create the list for iports and init its lock.
517 517 */
518 518 list_create(&pwp->iports, sizeof (pmcs_iport_t),
519 519 offsetof(pmcs_iport_t, list_node));
520 520 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL);
521 521
522 522 pwp->state = STATE_PROBING;
523 523
524 524 /*
525 525 * Get driver.conf properties
526 526 */
527 527 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
528 528 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask",
529 529 debug_mask);
530 530 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
531 531 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask",
532 532 block_mask);
533 533 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
534 534 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed);
535 535 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
536 536 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode);
537 537 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
538 538 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level);
539 539 if (pwp->fwlog > PMCS_FWLOG_MAX) {
540 540 pwp->fwlog = PMCS_FWLOG_MAX;
541 541 }
542 542 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "pmcs-fwlogfile",
543 543 &fwl_file) == DDI_SUCCESS)) {
544 544 if (snprintf(pwp->fwlogfile_aap1, MAXPATHLEN, "%s%d-aap1.0",
545 545 fwl_file, ddi_get_instance(dip)) > MAXPATHLEN) {
546 546 pwp->fwlogfile_aap1[0] = '\0';
547 547 pwp->fwlogfile_iop[0] = '\0';
548 548 } else if (snprintf(pwp->fwlogfile_iop, MAXPATHLEN,
549 549 "%s%d-iop.0", fwl_file,
550 550 ddi_get_instance(dip)) > MAXPATHLEN) {
551 551 pwp->fwlogfile_aap1[0] = '\0';
552 552 pwp->fwlogfile_iop[0] = '\0';
553 553 }
554 554 ddi_prop_free(fwl_file);
555 555 } else {
556 556 pwp->fwlogfile_aap1[0] = '\0';
557 557 pwp->fwlogfile_iop[0] = '\0';
558 558 }
559 559
560 560 pwp->open_retry_interval = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
561 561 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-open-retry-interval",
562 562 OPEN_RETRY_INTERVAL_DEF);
563 563 if (pwp->open_retry_interval > OPEN_RETRY_INTERVAL_MAX) {
564 564 pwp->open_retry_interval = OPEN_RETRY_INTERVAL_MAX;
565 565 }
566 566
567 567 mutex_enter(&pmcs_trace_lock);
568 568 if (pmcs_tbuf == NULL) {
569 569 /* Allocate trace buffer */
570 570 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
571 571 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems",
572 572 PMCS_TBUF_NUM_ELEMS_DEF);
573 573 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) ||
574 574 (pmcs_tbuf_num_elems == 0)) {
575 575 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF;
576 576 }
577 577
578 578 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems *
579 579 sizeof (pmcs_tbuf_t), KM_SLEEP);
580 580 pmcs_tbuf_ptr = pmcs_tbuf;
581 581 pmcs_tbuf_idx = 0;
582 582 }
583 583 mutex_exit(&pmcs_trace_lock);
584 584
585 585 if (pwp->fwlog && strlen(pwp->fwlogfile_aap1) > 0) {
586 586 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
587 587 "%s: firmware event log files: %s, %s", __func__,
588 588 pwp->fwlogfile_aap1, pwp->fwlogfile_iop);
589 589 pwp->fwlog_file = 1;
590 590 } else {
591 591 if (pwp->fwlog == 0) {
592 592 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
593 593 "%s: No firmware event log will be written "
594 594 "(event log disabled)", __func__);
595 595 } else {
596 596 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
597 597 "%s: No firmware event log will be written "
598 598 "(no filename configured - too long?)", __func__);
599 599 }
600 600 pwp->fwlog_file = 0;
601 601 }
602 602
603 603 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
604 604 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix",
605 605 disable_msix);
606 606 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
607 607 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi",
608 608 disable_msi);
609 609 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
610 610 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth);
611 611 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
612 612 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0);
613 613 if (pwp->fw_force_update == 0) {
614 614 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
615 615 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
616 616 "pmcs-fw-disable-update", 0);
617 617 }
618 618 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
619 619 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries",
620 620 PMCS_NQENTRY);
621 621
622 622 /*
623 623 * Initialize FMA
624 624 */
625 625 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr;
626 626 pwp->iqp_dma_attr = pwp->oqp_dma_attr =
627 627 pwp->regdump_dma_attr = pwp->cip_dma_attr =
628 628 pwp->fwlog_dma_attr = pmcs_dattr;
629 629 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip,
630 630 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable",
631 631 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
632 632 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
633 633 pmcs_fm_init(pwp);
634 634
635 635 /*
636 636 * Map registers
637 637 */
638 638 if (pci_config_setup(dip, &pwp->pci_acc_handle)) {
639 639 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL,
640 640 "pci config setup failed");
641 641 ddi_soft_state_free(pmcs_softc_state, inst);
642 642 return (DDI_FAILURE);
643 643 }
644 644
645 645 /*
646 646 * Get the size of register set 3.
647 647 */
648 648 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) {
649 649 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
650 650 "unable to get size of register set %d", PMCS_REGSET_3);
651 651 pci_config_teardown(&pwp->pci_acc_handle);
652 652 ddi_soft_state_free(pmcs_softc_state, inst);
653 653 return (DDI_FAILURE);
654 654 }
655 655
656 656 /*
657 657 * Map registers
658 658 */
659 659 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
660 660
661 661 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs,
662 662 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) {
663 663 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
664 664 "failed to map Message Unit registers");
665 665 pci_config_teardown(&pwp->pci_acc_handle);
666 666 ddi_soft_state_free(pmcs_softc_state, inst);
667 667 return (DDI_FAILURE);
668 668 }
669 669
670 670 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs,
671 671 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) {
672 672 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
673 673 "failed to map TOP registers");
674 674 ddi_regs_map_free(&pwp->msg_acc_handle);
675 675 pci_config_teardown(&pwp->pci_acc_handle);
676 676 ddi_soft_state_free(pmcs_softc_state, inst);
677 677 return (DDI_FAILURE);
678 678 }
679 679
680 680 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs,
681 681 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) {
682 682 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
683 683 "failed to map GSM registers");
684 684 ddi_regs_map_free(&pwp->top_acc_handle);
685 685 ddi_regs_map_free(&pwp->msg_acc_handle);
686 686 pci_config_teardown(&pwp->pci_acc_handle);
687 687 ddi_soft_state_free(pmcs_softc_state, inst);
688 688 return (DDI_FAILURE);
689 689 }
690 690
691 691 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs,
692 692 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) {
693 693 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
694 694 "failed to map MPI registers");
695 695 ddi_regs_map_free(&pwp->top_acc_handle);
696 696 ddi_regs_map_free(&pwp->gsm_acc_handle);
697 697 ddi_regs_map_free(&pwp->msg_acc_handle);
698 698 pci_config_teardown(&pwp->pci_acc_handle);
699 699 ddi_soft_state_free(pmcs_softc_state, inst);
700 700 return (DDI_FAILURE);
701 701 }
702 702 pwp->mpibar =
703 703 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size;
704 704
705 705 /*
706 706 * Make sure we can support this card.
707 707 */
708 708 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION);
709 709
710 710 switch (pwp->chiprev) {
711 711 case PMCS_PM8001_REV_A:
712 712 case PMCS_PM8001_REV_B:
713 713 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
714 714 "Rev A/B Card no longer supported");
715 715 goto failure;
716 716 case PMCS_PM8001_REV_C:
717 717 break;
718 718 default:
719 719 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
720 720 "Unknown chip revision (%d)", pwp->chiprev);
721 721 goto failure;
722 722 }
723 723
724 724 /*
725 725 * Allocate DMA addressable area for Inbound and Outbound Queue indices
726 726 * that the chip needs to access plus a space for scratch usage
727 727 */
728 728 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t);
729 729 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls,
730 730 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip,
731 731 &pwp->ciaddr) == B_FALSE) {
732 732 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
733 733 "Failed to setup DMA for index/scratch");
734 734 goto failure;
735 735 }
736 736
737 737 bzero(pwp->cip, ptob(1));
738 738 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE];
739 739 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE;
740 740
741 741 /*
742 742 * Allocate DMA S/G list chunks
743 743 */
744 744 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES);
745 745
746 746 /*
747 747 * Allocate a DMA addressable area for the firmware log (if needed)
748 748 */
749 749 if (pwp->fwlog) {
750 750 /*
751 751 * Align to event log header and entry size
752 752 */
753 753 pwp->fwlog_dma_attr.dma_attr_align = 32;
754 754 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr,
755 755 &pwp->fwlog_acchdl,
756 756 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE,
757 757 (caddr_t *)&pwp->fwlogp,
758 758 &pwp->fwaddr) == B_FALSE) {
759 759 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
760 760 "Failed to setup DMA for fwlog area");
761 761 pwp->fwlog = 0;
762 762 } else {
763 763 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
764 764 pwp->fwlogp_aap1 = (pmcs_fw_event_hdr_t *)pwp->fwlogp;
765 765 pwp->fwlogp_iop = (pmcs_fw_event_hdr_t *)((void *)
766 766 ((caddr_t)pwp->fwlogp + (PMCS_FWLOG_SIZE / 2)));
767 767 }
768 768 }
769 769
770 770 if (pwp->flash_chunk_addr == NULL) {
771 771 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE;
772 772 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr,
773 773 &pwp->regdump_acchdl,
774 774 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE,
775 775 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) ==
776 776 B_FALSE) {
777 777 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
778 778 "Failed to setup DMA for register dump area");
779 779 goto failure;
780 780 }
781 781 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE);
782 782 }
783 783
784 784 /*
785 785 * More bits of local initialization...
786 786 */
787 787 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0);
788 788 if (pwp->tq == NULL) {
789 789 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
790 790 "unable to create worker taskq");
791 791 goto failure;
792 792 }
793 793
794 794 /*
795 795 * Cache of structures for dealing with I/O completion callbacks.
796 796 */
797 797 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst);
798 798 pwp->iocomp_cb_cache = kmem_cache_create(buf,
799 799 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0);
800 800
801 801 /*
802 802 * Cache of PHY structures
803 803 */
804 804 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst);
805 805 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8,
806 806 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp,
807 807 NULL, 0);
808 808
809 809 /*
810 810 * Allocate space for the I/O completion threads
811 811 */
812 812 num_threads = ncpus_online;
813 813 if (num_threads > PMCS_MAX_CQ_THREADS) {
814 814 num_threads = PMCS_MAX_CQ_THREADS;
815 815 }
816 816
817 817 pwp->cq_info.cq_threads = num_threads;
818 818 pwp->cq_info.cq_thr_info = kmem_zalloc(
819 819 sizeof (pmcs_cq_thr_info_t) * pwp->cq_info.cq_threads, KM_SLEEP);
820 820 pwp->cq_info.cq_next_disp_thr = 0;
821 821 pwp->cq_info.cq_stop = B_FALSE;
822 822
823 823 /*
824 824 * Set the quantum value in clock ticks for the I/O interrupt
825 825 * coalescing timer.
826 826 */
827 827 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS);
828 828
829 829 /*
830 830 * We have a delicate dance here. We need to set up
831 831 * interrupts so we know how to set up some OQC
832 832 * tables. However, while we're setting up table
833 833 * access, we may need to flash new firmware and
834 834 * reset the card, which will take some finessing.
835 835 */
836 836
837 837 /*
838 838 * Set up interrupts here.
839 839 */
840 840 switch (pmcs_setup_intr(pwp)) {
841 841 case 0:
842 842 break;
843 843 case EIO:
844 844 pwp->stuck = 1;
845 845 /* FALLTHROUGH */
846 846 default:
847 847 goto failure;
848 848 }
849 849
850 850 /*
851 851 * Set these up now becuase they are used to initialize the OQC tables.
852 852 *
853 853 * If we have MSI or MSI-X interrupts set up and we have enough
854 854 * vectors for each OQ, the Outbound Queue vectors can all be the
855 855 * same as the appropriate interrupt routine will have been called
856 856 * and the doorbell register automatically cleared.
857 857 * This keeps us from having to check the Outbound Doorbell register
858 858 * when the routines for these interrupts are called.
859 859 *
860 860 * If we have Legacy INT-X interrupts set up or we didn't have enough
861 861 * MSI/MSI-X vectors to uniquely identify each OQ, we point these
862 862 * vectors to the bits we would like to have set in the Outbound
863 863 * Doorbell register because pmcs_all_intr will read the doorbell
864 864 * register to find out why we have an interrupt and write the
865 865 * corresponding 'clear' bit for that interrupt.
866 866 */
867 867
868 868 switch (pwp->intr_cnt) {
869 869 case 1:
870 870 /*
871 871 * Only one vector, so we must check all OQs for MSI. For
872 872 * INT-X, there's only one vector anyway, so we can just
873 873 * use the outbound queue bits to keep from having to
874 874 * check each queue for each interrupt.
875 875 */
876 876 if (pwp->int_type == PMCS_INT_FIXED) {
877 877 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
878 878 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
879 879 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS;
880 880 } else {
881 881 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
882 882 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE;
883 883 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE;
884 884 }
885 885 break;
886 886 case 2:
887 887 /* With 2, we can at least isolate IODONE */
888 888 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
889 889 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
890 890 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL;
891 891 break;
892 892 case 4:
893 893 /* With 4 vectors, everybody gets one */
894 894 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
895 895 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
896 896 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS;
897 897 break;
898 898 }
899 899
900 900 /*
901 901 * Do the first part of setup
902 902 */
903 903 if (pmcs_setup(pwp)) {
904 904 goto failure;
905 905 }
906 906 pmcs_report_fwversion(pwp);
907 907
908 908 /*
909 909 * Now do some additonal allocations based upon information
910 910 * gathered during MPI setup.
911 911 */
912 912 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP);
913 913 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX);
914 914 phyp = pwp->root_phys;
915 915 for (i = 0; i < pwp->nphy; i++) {
916 916 if (i < pwp->nphy-1) {
917 917 phyp->sibling = (phyp + 1);
918 918 }
919 919 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER,
920 920 DDI_INTR_PRI(pwp->intr_pri));
921 921 phyp->phynum = i & SAS2_PHYNUM_MASK;
922 922 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path));
923 923 phyp->pwp = pwp;
924 924 phyp->device_id = PMCS_INVALID_DEVICE_ID;
925 925 phyp->portid = PMCS_PHY_INVALID_PORT_ID;
926 926 phyp++;
927 927 }
928 928
929 929 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP);
930 930 for (i = 0; i < pwp->max_cmd; i++) {
931 931 pmcwork_t *pwrk = &pwp->work[i];
932 932 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER,
933 933 DDI_INTR_PRI(pwp->intr_pri));
934 934 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL);
935 935 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next);
936 936
937 937 }
938 938 pwp->targets = (pmcs_xscsi_t **)
939 939 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP);
940 940
941 941 pwp->iqpt = (pmcs_iqp_trace_t *)
942 942 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP);
943 943 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP);
944 944 pwp->iqpt->curpos = pwp->iqpt->head;
945 945 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE;
946 946
947 947 /*
948 948 * Start MPI communication.
949 949 */
950 950 if (pmcs_start_mpi(pwp)) {
951 951 if (pmcs_soft_reset(pwp, B_FALSE)) {
952 952 goto failure;
953 953 }
954 954 pwp->last_reset_reason = PMCS_LAST_RST_ATTACH;
955 955 }
956 956
957 957 /*
958 958 * Do some initial acceptance tests.
959 959 * This tests interrupts and queues.
960 960 */
961 961 if (pmcs_echo_test(pwp)) {
962 962 goto failure;
963 963 }
964 964
965 965 /* Read VPD - if it exists */
966 966 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) {
967 967 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
968 968 "%s: Unable to read VPD: "
969 969 "attempting to fabricate", __func__);
970 970 /*
971 971 * When we release, this must goto failure and the call
972 972 * to pmcs_fabricate_wwid is removed.
973 973 */
974 974 /* goto failure; */
975 975 if (!pmcs_fabricate_wwid(pwp)) {
976 976 goto failure;
977 977 }
978 978 }
979 979
980 980 /*
981 981 * We're now officially running
982 982 */
983 983 pwp->state = STATE_RUNNING;
984 984
985 985 /*
986 986 * Check firmware versions and load new firmware
987 987 * if needed and reset.
988 988 */
989 989 if (pmcs_firmware_update(pwp)) {
990 990 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL,
991 991 "%s: Firmware update failed", __func__);
992 992 goto failure;
993 993 }
994 994
995 995 /*
996 996 * Create completion threads.
997 997 */
998 998 for (i = 0; i < pwp->cq_info.cq_threads; i++) {
999 999 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp;
1000 1000 pwp->cq_info.cq_thr_info[i].cq_thread =
1001 1001 thread_create(NULL, 0, pmcs_scsa_cq_run,
1002 1002 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri);
1003 1003 }
1004 1004
1005 1005 /*
1006 1006 * Create one thread to deal with the updating of the interrupt
1007 1007 * coalescing timer.
1008 1008 */
1009 1009 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal,
1010 1010 pwp, 0, &p0, TS_RUN, minclsyspri);
1011 1011
1012 1012 /*
1013 1013 * Kick off the watchdog
1014 1014 */
1015 1015 pwp->wdhandle = timeout(pmcs_watchdog, pwp,
1016 1016 drv_usectohz(PMCS_WATCH_INTERVAL));
1017 1017 /*
1018 1018 * Do the SCSI attachment code (before starting phys)
1019 1019 */
1020 1020 if (pmcs_scsa_init(pwp, &pmcs_dattr)) {
1021 1021 goto failure;
1022 1022 }
1023 1023 pwp->hba_attached = 1;
1024 1024
1025 1025 /* Check all acc & dma handles allocated in attach */
1026 1026 if (pmcs_check_acc_dma_handle(pwp)) {
1027 1027 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
1028 1028 goto failure;
1029 1029 }
1030 1030
1031 1031 /*
1032 1032 * Create the iportmap for this HBA instance
1033 1033 */
1034 1034 if (scsi_hba_iportmap_create(dip, iportmap_csync_usec,
1035 1035 iportmap_stable_usec, &pwp->hss_iportmap) != DDI_SUCCESS) {
1036 1036 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1037 1037 "%s: pmcs%d iportmap_create failed", __func__, inst);
1038 1038 goto failure;
1039 1039 }
1040 1040 ASSERT(pwp->hss_iportmap);
1041 1041
1042 1042 /*
1043 1043 * Create the phymap for this HBA instance
1044 1044 */
1045 1045 if (sas_phymap_create(dip, phymap_stable_usec, PHYMAP_MODE_SIMPLE, NULL,
1046 1046 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate,
1047 1047 &pwp->hss_phymap) != DDI_SUCCESS) {
1048 1048 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1049 1049 "%s: pmcs%d phymap_create failed", __func__, inst);
1050 1050 goto failure;
1051 1051 }
1052 1052 ASSERT(pwp->hss_phymap);
1053 1053
1054 1054 /*
1055 1055 * Start the PHYs.
1056 1056 */
1057 1057 if (pmcs_start_phys(pwp)) {
1058 1058 goto failure;
1059 1059 }
1060 1060
1061 1061 /*
1062 1062 * From this point on, we can't fail.
1063 1063 */
1064 1064 ddi_report_dev(dip);
1065 1065
1066 1066 /* SM-HBA */
1067 1067 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED,
1068 1068 &sm_hba);
1069 1069
1070 1070 /* SM-HBA */
1071 1071 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION,
1072 1072 pmcs_driver_rev);
1073 1073
1074 1074 /* SM-HBA */
1075 1075 chiprev = 'A' + pwp->chiprev;
1076 1076 (void) snprintf(hw_rev, 2, "%s", &chiprev);
1077 1077 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION,
1078 1078 hw_rev);
1079 1079
1080 1080 /* SM-HBA */
1081 1081 switch (PMCS_FW_TYPE(pwp)) {
1082 1082 case PMCS_FW_TYPE_RELEASED:
1083 1083 fwsupport = "Released";
1084 1084 break;
1085 1085 case PMCS_FW_TYPE_DEVELOPMENT:
1086 1086 fwsupport = "Development";
1087 1087 break;
1088 1088 case PMCS_FW_TYPE_ALPHA:
1089 1089 fwsupport = "Alpha";
1090 1090 break;
1091 1091 case PMCS_FW_TYPE_BETA:
1092 1092 fwsupport = "Beta";
1093 1093 break;
1094 1094 default:
1095 1095 fwsupport = "Special";
1096 1096 break;
1097 1097 }
1098 1098 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s",
1099 1099 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp),
1100 1100 fwsupport);
1101 1101 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION,
1102 1102 fw_rev);
1103 1103
1104 1104 /* SM-HBA */
1105 1105 num_phys = pwp->nphy;
1106 1106 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA,
1107 1107 &num_phys);
1108 1108
1109 1109 /* SM-HBA */
1110 1110 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT;
1111 1111 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL,
1112 1112 &protocol);
1113 1113
1114 1114 /* Receptacle properties (FMA) */
1115 1115 pwp->recept_labels[0] = PMCS_RECEPT_LABEL_0;
1116 1116 pwp->recept_pm[0] = PMCS_RECEPT_PM_0;
1117 1117 pwp->recept_labels[1] = PMCS_RECEPT_LABEL_1;
1118 1118 pwp->recept_pm[1] = PMCS_RECEPT_PM_1;
1119 1119 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1120 1120 SCSI_HBA_PROP_RECEPTACLE_LABEL, &pwp->recept_labels[0],
1121 1121 PMCS_NUM_RECEPTACLES) != DDI_PROP_SUCCESS) {
1122 1122 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1123 1123 "%s: failed to create %s property", __func__,
1124 1124 "receptacle-label");
1125 1125 }
1126 1126 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1127 1127 SCSI_HBA_PROP_RECEPTACLE_PM, &pwp->recept_pm[0],
1128 1128 PMCS_NUM_RECEPTACLES) != DDI_PROP_SUCCESS) {
1129 1129 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1130 1130 "%s: failed to create %s property", __func__,
1131 1131 "receptacle-pm");
1132 1132 }
1133 1133
1134 1134 return (DDI_SUCCESS);
1135 1135
1136 1136 failure:
1137 1137 if (pmcs_unattach(pwp)) {
1138 1138 pwp->stuck = 1;
1139 1139 }
1140 1140 return (DDI_FAILURE);
1141 1141 }
1142 1142
1143 1143 int
1144 1144 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1145 1145 {
1146 1146 int inst = ddi_get_instance(dip);
1147 1147 pmcs_iport_t *iport = NULL;
1148 1148 pmcs_hw_t *pwp = NULL;
1149 1149 scsi_hba_tran_t *tran;
1150 1150
1151 1151 if (scsi_hba_iport_unit_address(dip) != NULL) {
1152 1152 /* iport node */
1153 1153 iport = ddi_get_soft_state(pmcs_iport_softstate, inst);
1154 1154 ASSERT(iport);
1155 1155 if (iport == NULL) {
1156 1156 return (DDI_FAILURE);
1157 1157 }
1158 1158 pwp = iport->pwp;
1159 1159 } else {
1160 1160 /* hba node */
1161 1161 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst);
1162 1162 ASSERT(pwp);
1163 1163 if (pwp == NULL) {
1164 1164 return (DDI_FAILURE);
1165 1165 }
1166 1166 }
1167 1167 switch (cmd) {
1168 1168 case DDI_DETACH:
1169 1169 if (iport) {
1170 1170 /* iport detach */
1171 1171 if (pmcs_iport_unattach(iport)) {
1172 1172 return (DDI_FAILURE);
1173 1173 }
1174 1174 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1175 1175 "iport%d detached", inst);
1176 1176 return (DDI_SUCCESS);
1177 1177 } else {
1178 1178 /* HBA detach */
1179 1179 if (pmcs_unattach(pwp)) {
1180 1180 return (DDI_FAILURE);
1181 1181 }
1182 1182 return (DDI_SUCCESS);
1183 1183 }
1184 1184
1185 1185 case DDI_SUSPEND:
1186 1186 case DDI_PM_SUSPEND:
1187 1187 /* No DDI_SUSPEND on iport nodes */
1188 1188 if (iport) {
1189 1189 return (DDI_SUCCESS);
1190 1190 }
1191 1191
1192 1192 if (pwp->stuck) {
1193 1193 return (DDI_FAILURE);
1194 1194 }
1195 1195 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
1196 1196 if (!tran) {
1197 1197 return (DDI_FAILURE);
1198 1198 }
1199 1199
1200 1200 pwp = TRAN2PMC(tran);
1201 1201 if (pwp == NULL) {
1202 1202 return (DDI_FAILURE);
1203 1203 }
1204 1204 mutex_enter(&pwp->lock);
1205 1205 if (pwp->tq) {
1206 1206 ddi_taskq_suspend(pwp->tq);
1207 1207 }
1208 1208 pwp->suspended = 1;
1209 1209 mutex_exit(&pwp->lock);
1210 1210 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending");
1211 1211 return (DDI_SUCCESS);
1212 1212
1213 1213 default:
1214 1214 return (DDI_FAILURE);
1215 1215 }
1216 1216 }
1217 1217
1218 1218 static int
1219 1219 pmcs_iport_unattach(pmcs_iport_t *iport)
1220 1220 {
1221 1221 pmcs_hw_t *pwp = iport->pwp;
1222 1222
1223 1223 /*
1224 1224 * First, check if there are still any configured targets on this
1225 1225 * iport. If so, we fail detach.
1226 1226 */
1227 1227 if (pmcs_iport_has_targets(pwp, iport)) {
1228 1228 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
1229 1229 "iport%d detach failure: iport has targets (luns)",
1230 1230 ddi_get_instance(iport->dip));
1231 1231 return (DDI_FAILURE);
1232 1232 }
1233 1233
1234 1234 /*
1235 1235 * Remove this iport from our list if it is inactive in the phymap.
1236 1236 */
1237 1237 rw_enter(&pwp->iports_lock, RW_WRITER);
1238 1238 mutex_enter(&iport->lock);
1239 1239
1240 1240 if ((iport->ua_state == UA_ACTIVE) &&
1241 1241 (detach_with_active_port == 0)) {
1242 1242 mutex_exit(&iport->lock);
1243 1243 rw_exit(&pwp->iports_lock);
1244 1244 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL,
1245 1245 "iport%d detach failure: "
1246 1246 "iport unit address active in phymap",
1247 1247 ddi_get_instance(iport->dip));
1248 1248 return (DDI_FAILURE);
1249 1249 }
1250 1250
1251 1251 /* If it's our only iport, clear iports_attached */
1252 1252 ASSERT(pwp->num_iports >= 1);
1253 1253 if (--pwp->num_iports == 0) {
1254 1254 pwp->iports_attached = 0;
1255 1255 }
1256 1256
1257 1257 ASSERT(list_link_active(&iport->list_node));
1258 1258 list_remove(&pwp->iports, iport);
1259 1259 rw_exit(&pwp->iports_lock);
1260 1260
1261 1261 /*
1262 1262 * We have removed the iport handle from the HBA's iports list,
1263 1263 * there will be no new references to it. Two things must be
1264 1264 * guarded against here. First, we could have PHY up events,
1265 1265 * adding themselves to the iport->phys list and grabbing ref's
1266 1266 * on our iport handle. Second, we could have existing references
1267 1267 * to this iport handle from a point in time prior to the list
1268 1268 * removal above.
1269 1269 *
1270 1270 * So first, destroy the phys list. Remove any phys that have snuck
1271 1271 * in after the phymap deactivate, dropping the refcnt accordingly.
1272 1272 * If these PHYs are still up if and when the phymap reactivates
1273 1273 * (i.e. when this iport reattaches), we'll populate the list with
1274 1274 * them and bump the refcnt back up.
1275 1275 */
1276 1276 pmcs_remove_phy_from_iport(iport, NULL);
1277 1277 ASSERT(list_is_empty(&iport->phys));
1278 1278 list_destroy(&iport->phys);
1279 1279 mutex_exit(&iport->lock);
1280 1280
1281 1281 /*
1282 1282 * Second, wait for any other references to this iport to be
1283 1283 * dropped, then continue teardown.
1284 1284 */
1285 1285 mutex_enter(&iport->refcnt_lock);
1286 1286 while (iport->refcnt != 0) {
1287 1287 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock);
1288 1288 }
1289 1289 mutex_exit(&iport->refcnt_lock);
1290 1290
1291 1291
1292 1292 /* Destroy the iport target map */
1293 1293 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) {
1294 1294 return (DDI_FAILURE);
1295 1295 }
1296 1296
1297 1297 /* Free the tgt soft state */
1298 1298 if (iport->tgt_sstate != NULL) {
1299 1299 ddi_soft_state_bystr_fini(&iport->tgt_sstate);
1300 1300 }
1301 1301
1302 1302 /* Free our unit address string */
1303 1303 strfree(iport->ua);
1304 1304
1305 1305 /* Finish teardown and free the softstate */
1306 1306 mutex_destroy(&iport->refcnt_lock);
1307 1307 mutex_destroy(&iport->smp_lock);
1308 1308 ASSERT(iport->refcnt == 0);
1309 1309 cv_destroy(&iport->refcnt_cv);
1310 1310 cv_destroy(&iport->smp_cv);
1311 1311 mutex_destroy(&iport->lock);
1312 1312 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip));
1313 1313
1314 1314 return (DDI_SUCCESS);
1315 1315 }
1316 1316
1317 1317 static int
1318 1318 pmcs_unattach(pmcs_hw_t *pwp)
1319 1319 {
1320 1320 int i;
1321 1321 enum pwpstate curstate;
1322 1322 pmcs_cq_thr_info_t *cqti;
1323 1323
1324 1324 /*
1325 1325 * Tear down the interrupt infrastructure.
1326 1326 */
1327 1327 if (pmcs_teardown_intr(pwp)) {
1328 1328 pwp->stuck = 1;
1329 1329 }
1330 1330 pwp->intr_cnt = 0;
1331 1331
1332 1332 /*
1333 1333 * Grab a lock, if initted, to set state.
1334 1334 */
1335 1335 if (pwp->locks_initted) {
1336 1336 mutex_enter(&pwp->lock);
1337 1337 if (pwp->state != STATE_DEAD) {
1338 1338 pwp->state = STATE_UNPROBING;
1339 1339 }
1340 1340 curstate = pwp->state;
1341 1341 mutex_exit(&pwp->lock);
1342 1342
1343 1343 /*
1344 1344 * Stop the I/O completion threads.
1345 1345 */
1346 1346 mutex_enter(&pwp->cq_lock);
1347 1347 pwp->cq_info.cq_stop = B_TRUE;
1348 1348 for (i = 0; i < pwp->cq_info.cq_threads; i++) {
1349 1349 if (pwp->cq_info.cq_thr_info[i].cq_thread) {
1350 1350 cqti = &pwp->cq_info.cq_thr_info[i];
1351 1351 mutex_enter(&cqti->cq_thr_lock);
1352 1352 cv_signal(&cqti->cq_cv);
1353 1353 mutex_exit(&cqti->cq_thr_lock);
1354 1354 mutex_exit(&pwp->cq_lock);
1355 1355 thread_join(cqti->cq_thread->t_did);
1356 1356 mutex_enter(&pwp->cq_lock);
1357 1357 }
1358 1358 }
1359 1359 mutex_exit(&pwp->cq_lock);
1360 1360 kmem_free(pwp->cq_info.cq_thr_info,
1361 1361 sizeof (pmcs_cq_thr_info_t) * pwp->cq_info.cq_threads);
1362 1362
1363 1363 /*
1364 1364 * Stop the interrupt coalescing timer thread
1365 1365 */
1366 1366 if (pwp->ict_thread) {
1367 1367 mutex_enter(&pwp->ict_lock);
1368 1368 pwp->io_intr_coal.stop_thread = B_TRUE;
1369 1369 cv_signal(&pwp->ict_cv);
1370 1370 mutex_exit(&pwp->ict_lock);
1371 1371 thread_join(pwp->ict_thread->t_did);
1372 1372 }
1373 1373 } else {
1374 1374 if (pwp->state != STATE_DEAD) {
1375 1375 pwp->state = STATE_UNPROBING;
1376 1376 }
1377 1377 curstate = pwp->state;
1378 1378 }
1379 1379
1380 1380 /*
1381 1381 * Make sure that any pending watchdog won't
1382 1382 * be called from this point on out.
1383 1383 */
1384 1384 (void) untimeout(pwp->wdhandle);
1385 1385 /*
1386 1386 * After the above action, the watchdog
1387 1387 * timer that starts up the worker task
1388 1388 * may trigger but will exit immediately
1389 1389 * on triggering.
1390 1390 *
1391 1391 * Now that this is done, we can destroy
1392 1392 * the task queue, which will wait if we're
1393 1393 * running something on it.
1394 1394 */
1395 1395 if (pwp->tq) {
1396 1396 ddi_taskq_destroy(pwp->tq);
1397 1397 pwp->tq = NULL;
1398 1398 }
1399 1399
1400 1400 pmcs_fm_fini(pwp);
1401 1401
1402 1402 if (pwp->hba_attached) {
1403 1403 (void) scsi_hba_detach(pwp->dip);
1404 1404 pwp->hba_attached = 0;
1405 1405 }
1406 1406
1407 1407 /*
1408 1408 * If the chip hasn't been marked dead, shut it down now
1409 1409 * to bring it back to a known state without attempting
1410 1410 * a soft reset.
1411 1411 */
1412 1412 if (curstate != STATE_DEAD && pwp->locks_initted) {
1413 1413 /*
1414 1414 * De-register all registered devices
1415 1415 */
1416 1416 pmcs_deregister_devices(pwp, pwp->root_phys);
1417 1417
1418 1418 /*
1419 1419 * Stop all the phys.
1420 1420 */
1421 1421 pmcs_stop_phys(pwp);
1422 1422
1423 1423 /*
1424 1424 * Shut Down Message Passing
1425 1425 */
1426 1426 (void) pmcs_stop_mpi(pwp);
1427 1427
1428 1428 /*
1429 1429 * Reset chip
1430 1430 */
1431 1431 (void) pmcs_soft_reset(pwp, B_FALSE);
1432 1432 pwp->last_reset_reason = PMCS_LAST_RST_DETACH;
1433 1433 }
1434 1434
1435 1435 /*
1436 1436 * Turn off interrupts on the chip
1437 1437 */
1438 1438 if (pwp->mpi_acc_handle) {
1439 1439 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
1440 1440 }
1441 1441
1442 1442 if (pwp->hss_phymap != NULL) {
1443 1443 /* Destroy the phymap */
1444 1444 sas_phymap_destroy(pwp->hss_phymap);
1445 1445 }
1446 1446
1447 1447 if (pwp->hss_iportmap != NULL) {
1448 1448 /* Destroy the iportmap */
1449 1449 scsi_hba_iportmap_destroy(pwp->hss_iportmap);
1450 1450 }
1451 1451
1452 1452 /* Destroy the iports lock and list */
1453 1453 rw_destroy(&pwp->iports_lock);
1454 1454 ASSERT(list_is_empty(&pwp->iports));
1455 1455 list_destroy(&pwp->iports);
1456 1456
1457 1457 /*
1458 1458 * Free DMA handles and associated consistent memory
1459 1459 */
1460 1460 if (pwp->regdump_hndl) {
1461 1461 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) {
1462 1462 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1463 1463 "Condition check failed "
1464 1464 "at %s():%d", __func__, __LINE__);
1465 1465 }
1466 1466 ddi_dma_free_handle(&pwp->regdump_hndl);
1467 1467 ddi_dma_mem_free(&pwp->regdump_acchdl);
1468 1468 pwp->regdump_hndl = 0;
1469 1469 }
1470 1470 if (pwp->fwlog_hndl) {
1471 1471 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) {
1472 1472 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1473 1473 "Condition check failed "
1474 1474 "at %s():%d", __func__, __LINE__);
1475 1475 }
1476 1476 ddi_dma_free_handle(&pwp->fwlog_hndl);
1477 1477 ddi_dma_mem_free(&pwp->fwlog_acchdl);
1478 1478 pwp->fwlog_hndl = 0;
1479 1479 }
1480 1480 if (pwp->cip_handles) {
1481 1481 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) {
1482 1482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1483 1483 "Condition check failed "
1484 1484 "at %s():%d", __func__, __LINE__);
1485 1485 }
1486 1486 ddi_dma_free_handle(&pwp->cip_handles);
1487 1487 ddi_dma_mem_free(&pwp->cip_acchdls);
1488 1488 pwp->cip_handles = 0;
1489 1489 }
1490 1490 for (i = 0; i < PMCS_NOQ; i++) {
1491 1491 if (pwp->oqp_handles[i]) {
1492 1492 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) !=
1493 1493 DDI_SUCCESS) {
1494 1494 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1495 1495 "Condition check failed at %s():%d",
1496 1496 __func__, __LINE__);
1497 1497 }
1498 1498 ddi_dma_free_handle(&pwp->oqp_handles[i]);
1499 1499 ddi_dma_mem_free(&pwp->oqp_acchdls[i]);
1500 1500 pwp->oqp_handles[i] = 0;
1501 1501 }
1502 1502 }
1503 1503 for (i = 0; i < PMCS_NIQ; i++) {
1504 1504 if (pwp->iqp_handles[i]) {
1505 1505 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) !=
1506 1506 DDI_SUCCESS) {
1507 1507 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1508 1508 "Condition check failed at %s():%d",
1509 1509 __func__, __LINE__);
1510 1510 }
1511 1511 ddi_dma_free_handle(&pwp->iqp_handles[i]);
1512 1512 ddi_dma_mem_free(&pwp->iqp_acchdls[i]);
1513 1513 pwp->iqp_handles[i] = 0;
1514 1514 }
1515 1515 }
1516 1516
1517 1517 pmcs_free_dma_chunklist(pwp);
1518 1518
1519 1519 /*
1520 1520 * Unmap registers and destroy access handles
1521 1521 */
1522 1522 if (pwp->mpi_acc_handle) {
1523 1523 ddi_regs_map_free(&pwp->mpi_acc_handle);
1524 1524 pwp->mpi_acc_handle = 0;
1525 1525 }
1526 1526 if (pwp->top_acc_handle) {
1527 1527 ddi_regs_map_free(&pwp->top_acc_handle);
1528 1528 pwp->top_acc_handle = 0;
1529 1529 }
1530 1530 if (pwp->gsm_acc_handle) {
1531 1531 ddi_regs_map_free(&pwp->gsm_acc_handle);
1532 1532 pwp->gsm_acc_handle = 0;
1533 1533 }
1534 1534 if (pwp->msg_acc_handle) {
1535 1535 ddi_regs_map_free(&pwp->msg_acc_handle);
1536 1536 pwp->msg_acc_handle = 0;
1537 1537 }
1538 1538 if (pwp->pci_acc_handle) {
1539 1539 pci_config_teardown(&pwp->pci_acc_handle);
1540 1540 pwp->pci_acc_handle = 0;
1541 1541 }
1542 1542
1543 1543 /*
1544 1544 * Do memory allocation cleanup.
1545 1545 */
1546 1546 while (pwp->dma_freelist) {
1547 1547 pmcs_dmachunk_t *this = pwp->dma_freelist;
1548 1548 pwp->dma_freelist = this->nxt;
1549 1549 kmem_free(this, sizeof (pmcs_dmachunk_t));
1550 1550 }
1551 1551
1552 1552 /*
1553 1553 * Free pools
1554 1554 */
1555 1555 if (pwp->iocomp_cb_cache) {
1556 1556 kmem_cache_destroy(pwp->iocomp_cb_cache);
1557 1557 }
1558 1558
1559 1559 /*
1560 1560 * Free all PHYs (at level > 0), then free the cache
1561 1561 */
1562 1562 pmcs_free_all_phys(pwp, pwp->root_phys);
1563 1563 if (pwp->phy_cache) {
1564 1564 kmem_cache_destroy(pwp->phy_cache);
1565 1565 }
1566 1566
1567 1567 /*
1568 1568 * Free root PHYs
1569 1569 */
1570 1570 if (pwp->root_phys) {
1571 1571 pmcs_phy_t *phyp = pwp->root_phys;
1572 1572 for (i = 0; i < pwp->nphy; i++) {
1573 1573 mutex_destroy(&phyp->phy_lock);
1574 1574 phyp = phyp->sibling;
1575 1575 }
1576 1576 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t));
1577 1577 pwp->root_phys = NULL;
1578 1578 pwp->nphy = 0;
1579 1579 }
1580 1580
1581 1581 /* Free the targets list */
1582 1582 if (pwp->targets) {
1583 1583 kmem_free(pwp->targets,
1584 1584 sizeof (pmcs_xscsi_t *) * pwp->max_dev);
1585 1585 }
1586 1586
1587 1587 /*
1588 1588 * Free work structures
1589 1589 */
1590 1590
1591 1591 if (pwp->work && pwp->max_cmd) {
1592 1592 for (i = 0; i < pwp->max_cmd; i++) {
1593 1593 pmcwork_t *pwrk = &pwp->work[i];
1594 1594 mutex_destroy(&pwrk->lock);
1595 1595 cv_destroy(&pwrk->sleep_cv);
1596 1596 }
1597 1597 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd);
1598 1598 pwp->work = NULL;
1599 1599 pwp->max_cmd = 0;
1600 1600 }
1601 1601
1602 1602 /*
1603 1603 * Do last property and SCSA cleanup
1604 1604 */
1605 1605 if (pwp->smp_tran) {
1606 1606 smp_hba_tran_free(pwp->smp_tran);
1607 1607 pwp->smp_tran = NULL;
1608 1608 }
1609 1609 if (pwp->tran) {
1610 1610 scsi_hba_tran_free(pwp->tran);
1611 1611 pwp->tran = NULL;
1612 1612 }
1613 1613 if (pwp->reset_notify_listf) {
1614 1614 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf);
1615 1615 pwp->reset_notify_listf = NULL;
1616 1616 }
1617 1617 ddi_prop_remove_all(pwp->dip);
1618 1618 if (pwp->stuck) {
1619 1619 return (-1);
1620 1620 }
1621 1621
1622 1622 /* Free register dump area if allocated */
1623 1623 if (pwp->regdumpp) {
1624 1624 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE);
1625 1625 pwp->regdumpp = NULL;
1626 1626 }
1627 1627 if (pwp->iqpt && pwp->iqpt->head) {
1628 1628 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE);
1629 1629 pwp->iqpt->head = pwp->iqpt->curpos = NULL;
1630 1630 }
1631 1631 if (pwp->iqpt) {
1632 1632 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t));
1633 1633 pwp->iqpt = NULL;
1634 1634 }
1635 1635
1636 1636 /* Destroy pwp's lock */
1637 1637 if (pwp->locks_initted) {
1638 1638 mutex_destroy(&pwp->lock);
1639 1639 mutex_destroy(&pwp->dma_lock);
1640 1640 mutex_destroy(&pwp->axil_lock);
1641 1641 mutex_destroy(&pwp->cq_lock);
1642 1642 mutex_destroy(&pwp->config_lock);
1643 1643 mutex_destroy(&pwp->ict_lock);
1644 1644 mutex_destroy(&pwp->wfree_lock);
1645 1645 mutex_destroy(&pwp->pfree_lock);
1646 1646 mutex_destroy(&pwp->dead_phylist_lock);
1647 1647 #ifdef DEBUG
1648 1648 mutex_destroy(&pwp->dbglock);
1649 1649 #endif
1650 1650 cv_destroy(&pwp->config_cv);
1651 1651 cv_destroy(&pwp->ict_cv);
1652 1652 cv_destroy(&pwp->drain_cv);
1653 1653 pwp->locks_initted = 0;
1654 1654 }
1655 1655
1656 1656 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip));
1657 1657 return (0);
1658 1658 }
1659 1659
1660 1660 /*
1661 1661 * quiesce (9E) entry point
1662 1662 *
1663 1663 * This function is called when the system is single-threaded at high PIL
1664 1664 * with preemption disabled. Therefore, the function must not block/wait/sleep.
1665 1665 *
1666 1666 * Returns DDI_SUCCESS or DDI_FAILURE.
1667 1667 *
1668 1668 */
1669 1669 static int
1670 1670 pmcs_quiesce(dev_info_t *dip)
1671 1671 {
1672 1672 pmcs_hw_t *pwp;
1673 1673 scsi_hba_tran_t *tran;
1674 1674
1675 1675 if ((tran = ddi_get_driver_private(dip)) == NULL)
1676 1676 return (DDI_SUCCESS);
1677 1677
1678 1678 /* No quiesce necessary on a per-iport basis */
1679 1679 if (scsi_hba_iport_unit_address(dip) != NULL) {
1680 1680 return (DDI_SUCCESS);
1681 1681 }
1682 1682
1683 1683 if ((pwp = TRAN2PMC(tran)) == NULL)
1684 1684 return (DDI_SUCCESS);
1685 1685
1686 1686 /* Stop MPI & Reset chip (no need to re-initialize) */
1687 1687 (void) pmcs_stop_mpi(pwp);
1688 1688 (void) pmcs_soft_reset(pwp, B_TRUE);
1689 1689 pwp->last_reset_reason = PMCS_LAST_RST_QUIESCE;
1690 1690
1691 1691 return (DDI_SUCCESS);
1692 1692 }
1693 1693
1694 1694 /*
1695 1695 * Called with xp->statlock and PHY lock and scratch acquired.
1696 1696 */
1697 1697 static int
1698 1698 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
1699 1699 {
1700 1700 ata_identify_t *ati;
1701 1701 int result, i;
1702 1702 pmcs_phy_t *pptr;
1703 1703 uint16_t *a;
1704 1704 union {
1705 1705 uint8_t nsa[8];
1706 1706 uint16_t nsb[4];
1707 1707 } u;
1708 1708
1709 1709 /*
1710 1710 * Safe defaults - use only if this target is brand new (i.e. doesn't
1711 1711 * already have these settings configured)
1712 1712 */
1713 1713 if (xp->capacity == 0) {
1714 1714 xp->capacity = (uint64_t)-1;
1715 1715 xp->ca = 1;
1716 1716 xp->qdepth = 1;
1717 1717 xp->pio = 1;
1718 1718 }
1719 1719
1720 1720 pptr = xp->phy;
1721 1721
1722 1722 /*
1723 1723 * We only try and issue an IDENTIFY for first level
1724 1724 * (direct attached) devices. We don't try and
1725 1725 * set other quirks here (this will happen later,
1726 1726 * if the device is fully configured)
1727 1727 */
1728 1728 if (pptr->level) {
1729 1729 return (0);
1730 1730 }
1731 1731
1732 1732 mutex_exit(&xp->statlock);
1733 1733 result = pmcs_sata_identify(pwp, pptr);
1734 1734 mutex_enter(&xp->statlock);
1735 1735
1736 1736 if (result) {
1737 1737 return (result);
1738 1738 }
1739 1739 ati = pwp->scratch;
1740 1740 a = &ati->word108;
1741 1741 for (i = 0; i < 4; i++) {
1742 1742 u.nsb[i] = ddi_swap16(*a++);
1743 1743 }
1744 1744
1745 1745 /*
1746 1746 * Check the returned data for being a valid (NAA=5) WWN.
1747 1747 * If so, use that and override the SAS address we were
1748 1748 * given at Link Up time.
1749 1749 */
1750 1750 if ((u.nsa[0] >> 4) == 5) {
1751 1751 (void) memcpy(pptr->sas_address, u.nsa, 8);
1752 1752 }
1753 1753 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp,
1754 1754 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT,
1755 1755 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address));
1756 1756 return (0);
1757 1757 }
1758 1758
1759 1759 /*
1760 1760 * Called with PHY lock and target statlock held and scratch acquired
1761 1761 */
1762 1762 static boolean_t
1763 1763 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target)
1764 1764 {
1765 1765 ASSERT(target != NULL);
1766 1766 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p",
1767 1767 __func__, (void *) target);
1768 1768
1769 1769 switch (target->phy->dtype) {
1770 1770 case SATA:
1771 1771 if (pmcs_add_sata_device(pwp, target) != 0) {
1772 1772 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy,
1773 1773 target, "%s: add_sata_device failed for tgt 0x%p",
1774 1774 __func__, (void *) target);
1775 1775 return (B_FALSE);
1776 1776 }
1777 1777 break;
1778 1778 case SAS:
1779 1779 target->qdepth = maxqdepth;
1780 1780 break;
1781 1781 case EXPANDER:
1782 1782 target->qdepth = 1;
1783 1783 break;
1784 1784 }
1785 1785
1786 1786 target->new = 0;
1787 1787 target->assigned = 1;
1788 1788 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL;
1789 1789 target->dtype = target->phy->dtype;
1790 1790
1791 1791 /*
1792 1792 * Set the PHY's config stop time to 0. This is one of the final
1793 1793 * stops along the config path, so we're indicating that we
1794 1794 * successfully configured the PHY.
1795 1795 */
1796 1796 target->phy->config_stop = 0;
1797 1797
1798 1798 return (B_TRUE);
1799 1799 }
1800 1800
1801 1801 void
1802 1802 pmcs_worker(void *arg)
1803 1803 {
1804 1804 pmcs_hw_t *pwp = arg;
1805 1805 ulong_t work_flags;
1806 1806
1807 1807 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t,
1808 1808 pwp->config_changed);
1809 1809
1810 1810 if (pwp->state != STATE_RUNNING) {
1811 1811 return;
1812 1812 }
1813 1813
1814 1814 work_flags = atomic_swap_ulong(&pwp->work_flags, 0);
1815 1815
1816 1816 if (work_flags & PMCS_WORK_FLAG_DUMP_REGS) {
1817 1817 mutex_enter(&pwp->lock);
1818 1818 pmcs_register_dump_int(pwp);
1819 1819 mutex_exit(&pwp->lock);
1820 1820 }
1821 1821
1822 1822 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) {
1823 1823 pmcs_ack_events(pwp);
1824 1824 }
1825 1825
1826 1826 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) {
1827 1827 mutex_enter(&pwp->lock);
1828 1828 pmcs_spinup_release(pwp, NULL);
1829 1829 mutex_exit(&pwp->lock);
1830 1830 }
1831 1831
1832 1832 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) {
1833 1833 pmcs_ssp_event_recovery(pwp);
1834 1834 }
1835 1835
1836 1836 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) {
1837 1837 pmcs_dev_state_recovery(pwp, NULL);
1838 1838 }
1839 1839
1840 1840 if (work_flags & PMCS_WORK_FLAG_DEREGISTER_DEV) {
1841 1841 pmcs_deregister_device_work(pwp, NULL);
1842 1842 }
1843 1843
1844 1844 if (work_flags & PMCS_WORK_FLAG_DISCOVER) {
1845 1845 pmcs_discover(pwp);
1846 1846 }
1847 1847
1848 1848 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) {
1849 1849 if (pmcs_abort_handler(pwp)) {
1850 1850 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
1851 1851 }
1852 1852 }
1853 1853
1854 1854 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) {
1855 1855 pmcs_sata_work(pwp);
1856 1856 }
1857 1857
1858 1858 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) {
1859 1859 pmcs_scsa_wq_run(pwp);
1860 1860 mutex_enter(&pwp->lock);
1861 1861 PMCS_CQ_RUN(pwp);
1862 1862 mutex_exit(&pwp->lock);
1863 1863 }
1864 1864
1865 1865 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) {
1866 1866 if (pmcs_add_more_chunks(pwp,
1867 1867 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) {
1868 1868 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
1869 1869 } else {
1870 1870 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1871 1871 }
1872 1872 }
1873 1873 }
1874 1874
1875 1875 static int
1876 1876 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize)
1877 1877 {
1878 1878 pmcs_dmachunk_t *dc;
1879 1879 unsigned long dl;
1880 1880 pmcs_chunk_t *pchunk = NULL;
1881 1881
1882 1882 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t);
1883 1883
1884 1884 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP);
1885 1885 if (pchunk == NULL) {
1886 1886 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1887 1887 "Not enough memory for DMA chunks");
1888 1888 return (-1);
1889 1889 }
1890 1890
1891 1891 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle,
1892 1892 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp,
1893 1893 &pchunk->dma_addr) == B_FALSE) {
1894 1894 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
1895 1895 "Failed to setup DMA for chunks");
1896 1896 kmem_free(pchunk, sizeof (pmcs_chunk_t));
1897 1897 return (-1);
1898 1898 }
1899 1899
1900 1900 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) ||
1901 1901 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) {
1902 1902 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED);
1903 1903 return (-1);
1904 1904 }
1905 1905
1906 1906 bzero(pchunk->addrp, nsize);
1907 1907 dc = NULL;
1908 1908 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) {
1909 1909 pmcs_dmachunk_t *tmp;
1910 1910 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP);
1911 1911 tmp->nxt = dc;
1912 1912 dc = tmp;
1913 1913 }
1914 1914 mutex_enter(&pwp->dma_lock);
1915 1915 pmcs_idma_chunks(pwp, dc, pchunk, nsize);
1916 1916 pwp->nchunks++;
1917 1917 mutex_exit(&pwp->dma_lock);
1918 1918 return (0);
1919 1919 }
1920 1920
1921 1921 static void
1922 1922 pmcs_check_forward_progress(pmcs_hw_t *pwp)
1923 1923 {
1924 1924 pmcwork_t *wrkp;
1925 1925 uint32_t *iqp;
1926 1926 uint32_t cur_iqci;
1927 1927 uint32_t cur_work_idx;
1928 1928 uint32_t cur_msgu_tick;
1929 1929 uint32_t cur_iop_tick;
1930 1930 int i;
1931 1931
1932 1932 mutex_enter(&pwp->lock);
1933 1933
1934 1934 if (pwp->state == STATE_IN_RESET) {
1935 1935 mutex_exit(&pwp->lock);
1936 1936 return;
1937 1937 }
1938 1938
1939 1939 /*
1940 1940 * Ensure that inbound work is getting picked up. First, check to
1941 1941 * see if new work has been posted. If it has, ensure that the
1942 1942 * work is moving forward by checking the consumer index and the
1943 1943 * last_htag for the work being processed against what we saw last
1944 1944 * time. Note: we use the work structure's 'last_htag' because at
1945 1945 * any given moment it could be freed back, thus clearing 'htag'
1946 1946 * and setting 'last_htag' (see pmcs_pwork).
1947 1947 */
1948 1948 for (i = 0; i < PMCS_NIQ; i++) {
1949 1949 cur_iqci = pmcs_rd_iqci(pwp, i);
1950 1950 iqp = &pwp->iqp[i][cur_iqci * (PMCS_QENTRY_SIZE >> 2)];
1951 1951 cur_work_idx = PMCS_TAG_INDEX(LE_32(*(iqp+1)));
1952 1952 wrkp = &pwp->work[cur_work_idx];
1953 1953 if (cur_iqci == pwp->shadow_iqpi[i]) {
1954 1954 pwp->last_iqci[i] = cur_iqci;
1955 1955 pwp->last_htag[i] = wrkp->last_htag;
1956 1956 continue;
1957 1957 }
1958 1958 if ((cur_iqci == pwp->last_iqci[i]) &&
1959 1959 (wrkp->last_htag == pwp->last_htag[i])) {
1960 1960 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL,
1961 1961 "Inbound Queue stall detected, issuing reset");
1962 1962 goto hot_reset;
1963 1963 }
1964 1964 pwp->last_iqci[i] = cur_iqci;
1965 1965 pwp->last_htag[i] = wrkp->last_htag;
1966 1966 }
1967 1967
1968 1968 /*
1969 1969 * Check heartbeat on both the MSGU and IOP. It is unlikely that
1970 1970 * we'd ever fail here, as the inbound queue monitoring code above
1971 1971 * would detect a stall due to either of these elements being
1972 1972 * stalled, but we might as well keep an eye on them.
1973 1973 */
1974 1974 cur_msgu_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK);
1975 1975 if (cur_msgu_tick == pwp->last_msgu_tick) {
1976 1976 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL,
1977 1977 "Stall detected on MSGU, issuing reset");
1978 1978 goto hot_reset;
1979 1979 }
1980 1980 pwp->last_msgu_tick = cur_msgu_tick;
1981 1981
1982 1982 cur_iop_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK);
1983 1983 if (cur_iop_tick == pwp->last_iop_tick) {
1984 1984 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL,
1985 1985 "Stall detected on IOP, issuing reset");
1986 1986 goto hot_reset;
1987 1987 }
1988 1988 pwp->last_iop_tick = cur_iop_tick;
1989 1989
1990 1990 mutex_exit(&pwp->lock);
1991 1991 return;
1992 1992
1993 1993 hot_reset:
1994 1994 pwp->state = STATE_DEAD;
1995 1995 /*
1996 1996 * We've detected a stall. Attempt to recover service via hot
1997 1997 * reset. In case of failure, pmcs_hot_reset() will handle the
1998 1998 * failure and issue any required FM notifications.
1999 1999 * See pmcs_subr.c for more details.
2000 2000 */
2001 2001 if (pmcs_hot_reset(pwp)) {
2002 2002 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
2003 2003 "%s: hot reset failure", __func__);
2004 2004 } else {
2005 2005 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
2006 2006 "%s: hot reset complete", __func__);
2007 2007 pwp->last_reset_reason = PMCS_LAST_RST_STALL;
2008 2008 }
2009 2009 mutex_exit(&pwp->lock);
2010 2010 }
2011 2011
2012 2012 static void
2013 2013 pmcs_check_commands(pmcs_hw_t *pwp)
2014 2014 {
2015 2015 pmcs_cmd_t *sp;
2016 2016 size_t amt;
2017 2017 char path[32];
2018 2018 pmcwork_t *pwrk;
2019 2019 pmcs_xscsi_t *target;
2020 2020 pmcs_phy_t *phyp;
2021 2021 int rval;
2022 2022
2023 2023 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) {
2024 2024 mutex_enter(&pwrk->lock);
2025 2025
2026 2026 /*
2027 2027 * If the command isn't active, we can't be timing it still.
2028 2028 * Active means the tag is not free and the state is "on chip".
2029 2029 */
2030 2030 if (!PMCS_COMMAND_ACTIVE(pwrk)) {
2031 2031 mutex_exit(&pwrk->lock);
2032 2032 continue;
2033 2033 }
2034 2034
2035 2035 /*
2036 2036 * No timer active for this command.
2037 2037 */
2038 2038 if (pwrk->timer == 0) {
2039 2039 mutex_exit(&pwrk->lock);
2040 2040 continue;
2041 2041 }
2042 2042
2043 2043 /*
2044 2044 * Knock off bits for the time interval.
2045 2045 */
2046 2046 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) {
2047 2047 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL);
2048 2048 } else {
2049 2049 pwrk->timer = 0;
2050 2050 }
2051 2051 if (pwrk->timer > 0) {
2052 2052 mutex_exit(&pwrk->lock);
2053 2053 continue;
2054 2054 }
2055 2055
2056 2056 /*
2057 2057 * The command has now officially timed out.
2058 2058 * Get the path for it. If it doesn't have
2059 2059 * a phy pointer any more, it's really dead
2060 2060 * and can just be put back on the free list.
2061 2061 * There should *not* be any commands associated
2062 2062 * with it any more.
2063 2063 */
2064 2064 if (pwrk->phy == NULL) {
2065 2065 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2066 2066 "dead command with gone phy being recycled");
2067 2067 ASSERT(pwrk->xp == NULL);
2068 2068 pmcs_pwork(pwp, pwrk);
2069 2069 continue;
2070 2070 }
2071 2071 amt = sizeof (path);
2072 2072 amt = min(sizeof (pwrk->phy->path), amt);
2073 2073 (void) memcpy(path, pwrk->phy->path, amt);
2074 2074
2075 2075 /*
2076 2076 * If this is a non-SCSA command, stop here. Eventually
2077 2077 * we might do something with non-SCSA commands here-
2078 2078 * but so far their timeout mechanisms are handled in
2079 2079 * the WAIT_FOR macro.
2080 2080 */
2081 2081 if (pwrk->xp == NULL) {
2082 2082 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2083 2083 "%s: non-SCSA cmd tag 0x%x timed out",
2084 2084 path, pwrk->htag);
2085 2085 mutex_exit(&pwrk->lock);
2086 2086 continue;
2087 2087 }
2088 2088
2089 2089 sp = pwrk->arg;
2090 2090 ASSERT(sp != NULL);
2091 2091
2092 2092 /*
2093 2093 * Mark it as timed out.
2094 2094 */
2095 2095 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT;
2096 2096 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT;
2097 2097 #ifdef DEBUG
2098 2098 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp,
2099 2099 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d",
2100 2100 path, pwrk->htag, pwrk->state, pwrk->onwire);
2101 2101 #else
2102 2102 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp,
2103 2103 "%s: SCSA cmd tag 0x%x timed out (state %x)",
2104 2104 path, pwrk->htag, pwrk->state);
2105 2105 #endif
2106 2106 /*
2107 2107 * Mark the work structure as timed out.
2108 2108 */
2109 2109 pwrk->state = PMCS_WORK_STATE_TIMED_OUT;
2110 2110 phyp = pwrk->phy;
2111 2111 target = pwrk->xp;
2112 2112 ASSERT(target != NULL);
2113 2113 mutex_exit(&pwrk->lock);
2114 2114
2115 2115 pmcs_lock_phy(phyp);
2116 2116 mutex_enter(&target->statlock);
2117 2117
2118 2118 /*
2119 2119 * No point attempting recovery if the device is gone
2120 2120 */
2121 2121 if (target->dev_gone) {
2122 2122 mutex_exit(&target->statlock);
2123 2123 pmcs_unlock_phy(phyp);
2124 2124 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target,
2125 2125 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE "
2126 2126 "for htag 0x%08x", __func__,
2127 2127 (void *)target, pwrk->htag);
2128 2128 mutex_enter(&pwrk->lock);
2129 2129 if (!PMCS_COMMAND_DONE(pwrk)) {
2130 2130 /* Complete this command here */
2131 2131 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target,
2132 2132 "%s: Completing cmd (htag 0x%08x) "
2133 2133 "anyway", __func__, pwrk->htag);
2134 2134 pwrk->dead = 1;
2135 2135 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
2136 2136 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
2137 2137 pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
2138 2138 } else {
2139 2139 mutex_exit(&pwrk->lock);
2140 2140 }
2141 2141 continue;
2142 2142 }
2143 2143
2144 2144 mutex_exit(&target->statlock);
2145 2145 rval = pmcs_abort(pwp, phyp, pwrk->htag, 0, 1);
2146 2146 if (rval) {
2147 2147 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target,
2148 2148 "%s: Bad status (%d) on abort of HTAG 0x%08x",
2149 2149 __func__, rval, pwrk->htag);
2150 2150 pmcs_unlock_phy(phyp);
2151 2151 mutex_enter(&pwrk->lock);
2152 2152 if (!PMCS_COMMAND_DONE(pwrk)) {
2153 2153 /* Complete this command here */
2154 2154 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target,
2155 2155 "%s: Completing cmd (htag 0x%08x) "
2156 2156 "anyway", __func__, pwrk->htag);
2157 2157 if (target->dev_gone) {
2158 2158 pwrk->dead = 1;
2159 2159 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
2160 2160 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
2161 2161 }
2162 2162 pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
2163 2163 } else {
2164 2164 mutex_exit(&pwrk->lock);
2165 2165 }
2166 2166 pmcs_lock_phy(phyp);
2167 2167 /*
2168 2168 * No need to reschedule ABORT if we get any other
2169 2169 * status
2170 2170 */
2171 2171 if (rval == ENOMEM) {
2172 2172 phyp->abort_sent = 0;
2173 2173 phyp->abort_pending = 1;
2174 2174 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
2175 2175 }
2176 2176 }
2177 2177 pmcs_unlock_phy(phyp);
2178 2178 }
2179 2179 /*
2180 2180 * Run any completions that may have been queued up.
2181 2181 */
2182 2182 PMCS_CQ_RUN(pwp);
2183 2183 }
2184 2184
2185 2185 static void
2186 2186 pmcs_watchdog(void *arg)
2187 2187 {
2188 2188 pmcs_hw_t *pwp = arg;
2189 2189
2190 2190 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t,
2191 2191 pwp->config_changed);
2192 2192
2193 2193 /*
2194 2194 * Check forward progress on the chip
2195 2195 */
2196 2196 if (++pwp->watchdog_count == PMCS_FWD_PROG_TRIGGER) {
2197 2197 pwp->watchdog_count = 0;
2198 2198 pmcs_check_forward_progress(pwp);
2199 2199 }
2200 2200
2201 2201 /*
2202 2202 * Check to see if we need to kick discovery off again
2203 2203 */
2204 2204 mutex_enter(&pwp->config_lock);
2205 2205 if (pwp->config_restart &&
2206 2206 (ddi_get_lbolt() >= pwp->config_restart_time)) {
2207 2207 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2208 2208 "%s: Timer expired for re-enumeration: Start discovery",
2209 2209 __func__);
2210 2210 pwp->config_restart = B_FALSE;
2211 2211 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
2212 2212 }
2213 2213 mutex_exit(&pwp->config_lock);
2214 2214
2215 2215 mutex_enter(&pwp->lock);
2216 2216 if (pwp->state != STATE_RUNNING) {
2217 2217 mutex_exit(&pwp->lock);
2218 2218 return;
2219 2219 }
2220 2220
2221 2221 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) {
2222 2222 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp,
2223 2223 DDI_NOSLEEP) != DDI_SUCCESS) {
2224 2224 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2225 2225 "Could not dispatch to worker thread");
2226 2226 }
2227 2227 }
2228 2228 pwp->wdhandle = timeout(pmcs_watchdog, pwp,
2229 2229 drv_usectohz(PMCS_WATCH_INTERVAL));
2230 2230
2231 2231 mutex_exit(&pwp->lock);
2232 2232
2233 2233 pmcs_check_commands(pwp);
2234 2234 pmcs_handle_dead_phys(pwp);
2235 2235 }
2236 2236
2237 2237 static int
2238 2238 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt)
2239 2239 {
2240 2240 int i, r, rslt = 0;
2241 2241 for (i = 0; i < icnt; i++) {
2242 2242 r = ddi_intr_remove_handler(pwp->ih_table[i]);
2243 2243 if (r == DDI_SUCCESS) {
2244 2244 continue;
2245 2245 }
2246 2246 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2247 2247 "%s: unable to remove interrupt handler %d", __func__, i);
2248 2248 rslt = -1;
2249 2249 break;
2250 2250 }
2251 2251 return (rslt);
2252 2252 }
2253 2253
2254 2254 static int
2255 2255 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt)
2256 2256 {
2257 2257 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) {
2258 2258 int r = ddi_intr_block_disable(&pwp->ih_table[0],
2259 2259 pwp->intr_cnt);
2260 2260 if (r != DDI_SUCCESS) {
2261 2261 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2262 2262 "unable to disable interrupt block");
2263 2263 return (-1);
2264 2264 }
2265 2265 } else {
2266 2266 int i;
2267 2267 for (i = 0; i < icnt; i++) {
2268 2268 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) {
2269 2269 continue;
2270 2270 }
2271 2271 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2272 2272 "unable to disable interrupt %d", i);
2273 2273 return (-1);
2274 2274 }
2275 2275 }
2276 2276 return (0);
2277 2277 }
2278 2278
2279 2279 static int
2280 2280 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt)
2281 2281 {
2282 2282 int i;
2283 2283 for (i = 0; i < icnt; i++) {
2284 2284 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) {
2285 2285 continue;
2286 2286 }
2287 2287 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2288 2288 "unable to free interrupt %d", i);
2289 2289 return (-1);
2290 2290 }
2291 2291 kmem_free(pwp->ih_table, pwp->ih_table_size);
2292 2292 pwp->ih_table_size = 0;
2293 2293 return (0);
2294 2294 }
2295 2295
2296 2296 /*
2297 2297 * Try to set up interrupts of type "type" with a minimum number of interrupts
2298 2298 * of "min".
2299 2299 */
2300 2300 static void
2301 2301 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min)
2302 2302 {
2303 2303 int rval, avail, count, actual, max;
2304 2304
2305 2305 rval = ddi_intr_get_nintrs(pwp->dip, type, &count);
2306 2306 if ((rval != DDI_SUCCESS) || (count < min)) {
2307 2307 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2308 2308 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d",
2309 2309 __func__, type, rval, count, min);
2310 2310 return;
2311 2311 }
2312 2312
2313 2313 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2314 2314 "%s: nintrs = %d for type: %d", __func__, count, type);
2315 2315
2316 2316 rval = ddi_intr_get_navail(pwp->dip, type, &avail);
2317 2317 if ((rval != DDI_SUCCESS) || (avail < min)) {
2318 2318 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2319 2319 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d",
2320 2320 __func__, type, rval, avail, min);
2321 2321 return;
2322 2322 }
2323 2323
2324 2324 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2325 2325 "%s: navail = %d for type: %d", __func__, avail, type);
2326 2326
2327 2327 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t);
2328 2328 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP);
2329 2329
2330 2330 switch (type) {
2331 2331 case DDI_INTR_TYPE_MSIX:
2332 2332 pwp->int_type = PMCS_INT_MSIX;
2333 2333 max = PMCS_MAX_MSIX;
2334 2334 break;
2335 2335 case DDI_INTR_TYPE_MSI:
2336 2336 pwp->int_type = PMCS_INT_MSI;
2337 2337 max = PMCS_MAX_MSI;
2338 2338 break;
2339 2339 case DDI_INTR_TYPE_FIXED:
2340 2340 default:
2341 2341 pwp->int_type = PMCS_INT_FIXED;
2342 2342 max = PMCS_MAX_FIXED;
2343 2343 break;
2344 2344 }
2345 2345
2346 2346 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual,
2347 2347 DDI_INTR_ALLOC_NORMAL);
2348 2348 if (rval != DDI_SUCCESS) {
2349 2349 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL,
2350 2350 "%s: ddi_intr_alloc failed; type: %d rc: %d",
2351 2351 __func__, type, rval);
2352 2352 kmem_free(pwp->ih_table, pwp->ih_table_size);
2353 2353 pwp->ih_table = NULL;
2354 2354 pwp->ih_table_size = 0;
2355 2355 pwp->intr_cnt = 0;
2356 2356 pwp->int_type = PMCS_INT_NONE;
2357 2357 return;
2358 2358 }
2359 2359
2360 2360 pwp->intr_cnt = actual;
2361 2361 }
2362 2362
2363 2363 /*
2364 2364 * Set up interrupts.
2365 2365 * We return one of three values:
2366 2366 *
2367 2367 * 0 - success
2368 2368 * EAGAIN - failure to set up interrupts
2369 2369 * EIO - "" + we're now stuck partly enabled
2370 2370 *
2371 2371 * If EIO is returned, we can't unload the driver.
2372 2372 */
2373 2373 static int
2374 2374 pmcs_setup_intr(pmcs_hw_t *pwp)
2375 2375 {
2376 2376 int i, r, itypes, oqv_count;
2377 2377 ddi_intr_handler_t **iv_table;
2378 2378 size_t iv_table_size;
2379 2379 uint_t pri;
2380 2380
2381 2381 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) {
2382 2382 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2383 2383 "cannot get interrupt types");
2384 2384 return (EAGAIN);
2385 2385 }
2386 2386
2387 2387 if (disable_msix) {
2388 2388 itypes &= ~DDI_INTR_TYPE_MSIX;
2389 2389 }
2390 2390 if (disable_msi) {
2391 2391 itypes &= ~DDI_INTR_TYPE_MSI;
2392 2392 }
2393 2393
2394 2394 /*
2395 2395 * We won't know what firmware we're running until we call pmcs_setup,
2396 2396 * and we can't call pmcs_setup until we establish interrupts.
2397 2397 */
2398 2398
2399 2399 pwp->int_type = PMCS_INT_NONE;
2400 2400
2401 2401 /*
2402 2402 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be
2403 2403 * uncivilized.
2404 2404 */
2405 2405 if (itypes & DDI_INTR_TYPE_MSIX) {
2406 2406 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX);
2407 2407 if (pwp->int_type == PMCS_INT_MSIX) {
2408 2408 itypes = 0;
2409 2409 }
2410 2410 }
2411 2411
2412 2412 if (itypes & DDI_INTR_TYPE_MSI) {
2413 2413 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1);
2414 2414 if (pwp->int_type == PMCS_INT_MSI) {
2415 2415 itypes = 0;
2416 2416 }
2417 2417 }
2418 2418
2419 2419 if (itypes & DDI_INTR_TYPE_FIXED) {
2420 2420 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1);
2421 2421 if (pwp->int_type == PMCS_INT_FIXED) {
2422 2422 itypes = 0;
2423 2423 }
2424 2424 }
2425 2425
2426 2426 if (pwp->intr_cnt == 0) {
2427 2427 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
2428 2428 "No interrupts available");
2429 2429 return (EAGAIN);
2430 2430 }
2431 2431
2432 2432 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt;
2433 2433 iv_table = kmem_alloc(iv_table_size, KM_SLEEP);
2434 2434
2435 2435 /*
2436 2436 * Get iblock cookie and add handlers.
2437 2437 */
2438 2438 switch (pwp->intr_cnt) {
2439 2439 case 1:
2440 2440 iv_table[0] = pmcs_all_intr;
2441 2441 break;
2442 2442 case 2:
2443 2443 iv_table[0] = pmcs_iodone_ix;
2444 2444 iv_table[1] = pmcs_nonio_ix;
2445 2445 break;
2446 2446 case 4:
2447 2447 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix;
2448 2448 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix;
2449 2449 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix;
2450 2450 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix;
2451 2451 break;
2452 2452 default:
2453 2453 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2454 2454 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt);
2455 2455 kmem_free(iv_table, iv_table_size);
2456 2456 return (EAGAIN);
2457 2457 }
2458 2458
2459 2459 for (i = 0; i < pwp->intr_cnt; i++) {
2460 2460 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i],
2461 2461 (caddr_t)pwp, NULL);
2462 2462 if (r != DDI_SUCCESS) {
2463 2463 kmem_free(iv_table, iv_table_size);
2464 2464 if (pmcs_remove_ihandlers(pwp, i)) {
2465 2465 return (EIO);
2466 2466 }
2467 2467 if (pmcs_free_intrs(pwp, i)) {
2468 2468 return (EIO);
2469 2469 }
2470 2470 pwp->intr_cnt = 0;
2471 2471 return (EAGAIN);
2472 2472 }
2473 2473 }
2474 2474
2475 2475 kmem_free(iv_table, iv_table_size);
2476 2476
2477 2477 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) {
2478 2478 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2479 2479 "unable to get int capabilities");
2480 2480 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2481 2481 return (EIO);
2482 2482 }
2483 2483 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2484 2484 return (EIO);
2485 2485 }
2486 2486 pwp->intr_cnt = 0;
2487 2487 return (EAGAIN);
2488 2488 }
2489 2489
2490 2490 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) {
2491 2491 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt);
2492 2492 if (r != DDI_SUCCESS) {
2493 2493 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2494 2494 "intr blk enable failed");
2495 2495 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2496 2496 return (EIO);
2497 2497 }
2498 2498 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2499 2499 return (EIO);
2500 2500 }
2501 2501 pwp->intr_cnt = 0;
2502 2502 return (EFAULT);
2503 2503 }
2504 2504 } else {
2505 2505 for (i = 0; i < pwp->intr_cnt; i++) {
2506 2506 r = ddi_intr_enable(pwp->ih_table[i]);
2507 2507 if (r == DDI_SUCCESS) {
2508 2508 continue;
2509 2509 }
2510 2510 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2511 2511 "unable to enable interrupt %d", i);
2512 2512 if (pmcs_disable_intrs(pwp, i)) {
2513 2513 return (EIO);
2514 2514 }
2515 2515 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2516 2516 return (EIO);
2517 2517 }
2518 2518 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2519 2519 return (EIO);
2520 2520 }
2521 2521 pwp->intr_cnt = 0;
2522 2522 return (EAGAIN);
2523 2523 }
2524 2524 }
2525 2525
2526 2526 /*
2527 2527 * Set up locks.
2528 2528 */
2529 2529 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) {
2530 2530 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2531 2531 "unable to get interrupt priority");
2532 2532 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) {
2533 2533 return (EIO);
2534 2534 }
2535 2535 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2536 2536 return (EIO);
2537 2537 }
2538 2538 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2539 2539 return (EIO);
2540 2540 }
2541 2541 pwp->intr_cnt = 0;
2542 2542 return (EAGAIN);
2543 2543 }
2544 2544
2545 2545 pwp->locks_initted = 1;
2546 2546 pwp->intr_pri = pri;
2547 2547 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2548 2548 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2549 2549 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2550 2550 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2551 2551 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2552 2552 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2553 2553 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2554 2554 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2555 2555 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER,
2556 2556 DDI_INTR_PRI(pri));
2557 2557 #ifdef DEBUG
2558 2558 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
2559 2559 #endif
2560 2560 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL);
2561 2561 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL);
2562 2562 cv_init(&pwp->config_cv, NULL, CV_DRIVER, NULL);
2563 2563 for (i = 0; i < PMCS_NIQ; i++) {
2564 2564 mutex_init(&pwp->iqp_lock[i], NULL,
2565 2565 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri));
2566 2566 }
2567 2567 for (i = 0; i < pwp->cq_info.cq_threads; i++) {
2568 2568 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL,
2569 2569 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri));
2570 2570 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL,
2571 2571 CV_DRIVER, NULL);
2572 2572 }
2573 2573
2574 2574 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured",
2575 2575 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" :
2576 2576 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"),
2577 2577 pwp->intr_cnt == 1? "t" : "ts");
2578 2578
2579 2579
2580 2580 /*
2581 2581 * Enable Interrupts
2582 2582 */
2583 2583 if (pwp->intr_cnt > PMCS_NOQ) {
2584 2584 oqv_count = pwp->intr_cnt;
2585 2585 } else {
2586 2586 oqv_count = PMCS_NOQ;
2587 2587 }
2588 2588 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) {
2589 2589 pri ^= (1 << i);
2590 2590 }
2591 2591
2592 2592 mutex_enter(&pwp->lock);
2593 2593 pwp->intr_mask = pri;
2594 2594 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
2595 2595 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
2596 2596 mutex_exit(&pwp->lock);
2597 2597
2598 2598 return (0);
2599 2599 }
2600 2600
2601 2601 static int
2602 2602 pmcs_teardown_intr(pmcs_hw_t *pwp)
2603 2603 {
2604 2604 if (pwp->intr_cnt) {
2605 2605 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) {
2606 2606 return (EIO);
2607 2607 }
2608 2608 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
2609 2609 return (EIO);
2610 2610 }
2611 2611 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
2612 2612 return (EIO);
2613 2613 }
2614 2614 pwp->intr_cnt = 0;
2615 2615 }
2616 2616 return (0);
2617 2617 }
2618 2618
2619 2619 static uint_t
2620 2620 pmcs_general_ix(caddr_t arg1, caddr_t arg2)
2621 2621 {
2622 2622 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2623 2623 _NOTE(ARGUNUSED(arg2));
2624 2624 pmcs_general_intr(pwp);
2625 2625 return (DDI_INTR_CLAIMED);
2626 2626 }
2627 2627
2628 2628 static uint_t
2629 2629 pmcs_event_ix(caddr_t arg1, caddr_t arg2)
2630 2630 {
2631 2631 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2632 2632 _NOTE(ARGUNUSED(arg2));
2633 2633 pmcs_event_intr(pwp);
2634 2634 return (DDI_INTR_CLAIMED);
2635 2635 }
2636 2636
2637 2637 static uint_t
2638 2638 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2)
2639 2639 {
2640 2640 _NOTE(ARGUNUSED(arg2));
2641 2641 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2642 2642
2643 2643 /*
2644 2644 * It's possible that if we just turned interrupt coalescing off
2645 2645 * (and thus, re-enabled auto clear for interrupts on the I/O outbound
2646 2646 * queue) that there was an interrupt already pending. We use
2647 2647 * io_intr_coal.int_cleared to ensure that we still drop in here and
2648 2648 * clear the appropriate interrupt bit one last time.
2649 2649 */
2650 2650 mutex_enter(&pwp->ict_lock);
2651 2651 if (pwp->io_intr_coal.timer_on ||
2652 2652 (pwp->io_intr_coal.int_cleared == B_FALSE)) {
2653 2653 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2654 2654 (1 << PMCS_OQ_IODONE));
2655 2655 pwp->io_intr_coal.int_cleared = B_TRUE;
2656 2656 }
2657 2657 mutex_exit(&pwp->ict_lock);
2658 2658
2659 2659 pmcs_iodone_intr(pwp);
2660 2660
2661 2661 return (DDI_INTR_CLAIMED);
2662 2662 }
2663 2663
2664 2664 static uint_t
2665 2665 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2)
2666 2666 {
2667 2667 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
2668 2668 _NOTE(ARGUNUSED(arg2));
2669 2669 pmcs_fatal_handler(pwp);
2670 2670 return (DDI_INTR_CLAIMED);
2671 2671 }
2672 2672
2673 2673 static uint_t
2674 2674 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2)
2675 2675 {
2676 2676 _NOTE(ARGUNUSED(arg2));
2677 2677 pmcs_hw_t *pwp = (void *)arg1;
2678 2678 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB);
2679 2679
2680 2680 /*
2681 2681 * Check for Fatal Interrupts
2682 2682 */
2683 2683 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) {
2684 2684 pmcs_fatal_handler(pwp);
2685 2685 return (DDI_INTR_CLAIMED);
2686 2686 }
2687 2687
2688 2688 if (obdb & (1 << PMCS_OQ_GENERAL)) {
2689 2689 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2690 2690 (1 << PMCS_OQ_GENERAL));
2691 2691 pmcs_general_intr(pwp);
2692 2692 pmcs_event_intr(pwp);
2693 2693 }
2694 2694
2695 2695 return (DDI_INTR_CLAIMED);
2696 2696 }
2697 2697
2698 2698 static uint_t
2699 2699 pmcs_all_intr(caddr_t arg1, caddr_t arg2)
2700 2700 {
2701 2701 _NOTE(ARGUNUSED(arg2));
2702 2702 pmcs_hw_t *pwp = (void *) arg1;
2703 2703 uint32_t obdb;
2704 2704 int handled = 0;
2705 2705
2706 2706 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB);
2707 2707
2708 2708 /*
2709 2709 * Check for Fatal Interrupts
2710 2710 */
2711 2711 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) {
2712 2712 pmcs_fatal_handler(pwp);
2713 2713 return (DDI_INTR_CLAIMED);
2714 2714 }
2715 2715
2716 2716 /*
2717 2717 * Check for Outbound Queue service needed
2718 2718 */
2719 2719 if (obdb & (1 << PMCS_OQ_IODONE)) {
2720 2720 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2721 2721 (1 << PMCS_OQ_IODONE));
2722 2722 obdb ^= (1 << PMCS_OQ_IODONE);
2723 2723 handled++;
2724 2724 pmcs_iodone_intr(pwp);
2725 2725 }
2726 2726 if (obdb & (1 << PMCS_OQ_GENERAL)) {
2727 2727 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2728 2728 (1 << PMCS_OQ_GENERAL));
2729 2729 obdb ^= (1 << PMCS_OQ_GENERAL);
2730 2730 handled++;
2731 2731 pmcs_general_intr(pwp);
2732 2732 }
2733 2733 if (obdb & (1 << PMCS_OQ_EVENTS)) {
2734 2734 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
2735 2735 (1 << PMCS_OQ_EVENTS));
2736 2736 obdb ^= (1 << PMCS_OQ_EVENTS);
2737 2737 handled++;
2738 2738 pmcs_event_intr(pwp);
2739 2739 }
2740 2740 if (obdb) {
2741 2741 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
2742 2742 "interrupt bits not handled (0x%x)", obdb);
2743 2743 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb);
2744 2744 handled++;
2745 2745 }
2746 2746 if (pwp->int_type == PMCS_INT_MSI) {
2747 2747 handled++;
2748 2748 }
2749 2749 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2750 2750 }
2751 2751
2752 2752 void
2753 2753 pmcs_fatal_handler(pmcs_hw_t *pwp)
2754 2754 {
2755 2755 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught");
2756 2756
2757 2757 mutex_enter(&pwp->lock);
2758 2758 pwp->state = STATE_DEAD;
2759 2759
2760 2760 /*
2761 2761 * Attempt a hot reset. In case of failure, pmcs_hot_reset() will
2762 2762 * handle the failure and issue any required FM notifications.
2763 2763 * See pmcs_subr.c for more details.
2764 2764 */
2765 2765 if (pmcs_hot_reset(pwp)) {
2766 2766 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
2767 2767 "%s: hot reset failure", __func__);
2768 2768 } else {
2769 2769 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL,
2770 2770 "%s: hot reset complete", __func__);
2771 2771 pwp->last_reset_reason = PMCS_LAST_RST_FATAL_ERROR;
2772 2772 }
2773 2773 mutex_exit(&pwp->lock);
2774 2774 }
2775 2775
2776 2776 /*
2777 2777 * Called with PHY lock and target statlock held and scratch acquired.
2778 2778 */
2779 2779 boolean_t
2780 2780 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt)
2781 2781 {
2782 2782 pmcs_phy_t *pptr = tgt->phy;
2783 2783
2784 2784 switch (pptr->dtype) {
2785 2785 case SAS:
2786 2786 case EXPANDER:
2787 2787 break;
2788 2788 case SATA:
2789 2789 tgt->ca = 1;
2790 2790 break;
2791 2791 default:
2792 2792 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt,
2793 2793 "%s: Target %p has PHY %p with invalid dtype",
2794 2794 __func__, (void *)tgt, (void *)pptr);
2795 2795 return (B_FALSE);
2796 2796 }
2797 2797
2798 2798 tgt->new = 1;
2799 2799 tgt->dev_gone = 0;
2800 2800 tgt->recover_wait = 0;
2801 2801
2802 2802 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt,
2803 2803 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__,
2804 2804 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address));
2805 2805
2806 2806 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) {
2807 2807 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt,
2808 2808 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__,
2809 2809 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address));
2810 2810 mutex_destroy(&tgt->statlock);
2811 2811 mutex_destroy(&tgt->wqlock);
2812 2812 mutex_destroy(&tgt->aqlock);
2813 2813 return (B_FALSE);
2814 2814 }
2815 2815
2816 2816 return (B_TRUE);
2817 2817 }
2818 2818
2819 2819 /*
2820 2820 * Called with softstate lock held
2821 2821 */
2822 2822 void
2823 2823 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
2824 2824 {
2825 2825 pmcs_xscsi_t *xp;
2826 2826 unsigned int vtgt;
2827 2827
2828 2828 ASSERT(mutex_owned(&pwp->lock));
2829 2829
2830 2830 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) {
2831 2831 xp = pwp->targets[vtgt];
2832 2832 if (xp == NULL) {
2833 2833 continue;
2834 2834 }
2835 2835
2836 2836 mutex_enter(&xp->statlock);
2837 2837 if (xp->phy == pptr) {
2838 2838 if (xp->new) {
2839 2839 xp->new = 0;
2840 2840 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp,
2841 2841 "cancel config of vtgt %u", vtgt);
2842 2842 } else {
2843 2843 pmcs_clear_xp(pwp, xp);
2844 2844 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp,
2845 2845 "Removed tgt 0x%p vtgt %u",
2846 2846 (void *)xp, vtgt);
2847 2847 }
2848 2848 mutex_exit(&xp->statlock);
2849 2849 break;
2850 2850 }
2851 2851 mutex_exit(&xp->statlock);
2852 2852 }
2853 2853 }
2854 2854
2855 2855 void
2856 2856 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level,
2857 2857 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...)
2858 2858 {
2859 2859 va_list ap;
2860 2860 int written = 0;
2861 2861 char *ptr;
2862 2862 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1;
2863 2863 boolean_t system_log;
2864 2864 int system_log_level;
2865 2865 hrtime_t hrtimestamp;
2866 2866
2867 2867 switch (level) {
2868 2868 case PMCS_PRT_DEBUG_DEVEL:
2869 2869 case PMCS_PRT_DEBUG_DEV_STATE:
2870 2870 case PMCS_PRT_DEBUG_PHY_LOCKING:
2871 2871 case PMCS_PRT_DEBUG_SCSI_STATUS:
2872 2872 case PMCS_PRT_DEBUG_UNDERFLOW:
2873 2873 case PMCS_PRT_DEBUG_CONFIG:
2874 2874 case PMCS_PRT_DEBUG_IPORT:
2875 2875 case PMCS_PRT_DEBUG_MAP:
2876 2876 case PMCS_PRT_DEBUG3:
2877 2877 case PMCS_PRT_DEBUG2:
2878 2878 case PMCS_PRT_DEBUG1:
2879 2879 case PMCS_PRT_DEBUG:
2880 2880 system_log = B_FALSE;
2881 2881 break;
2882 2882 case PMCS_PRT_INFO:
2883 2883 system_log = B_TRUE;
2884 2884 system_log_level = CE_CONT;
2885 2885 break;
2886 2886 case PMCS_PRT_WARN:
2887 2887 system_log = B_TRUE;
2888 2888 system_log_level = CE_NOTE;
2889 2889 break;
2890 2890 case PMCS_PRT_ERR:
2891 2891 system_log = B_TRUE;
2892 2892 system_log_level = CE_WARN;
2893 2893 break;
2894 2894 default:
2895 2895 return;
2896 2896 }
2897 2897
2898 2898 mutex_enter(&pmcs_trace_lock);
2899 2899 hrtimestamp = gethrtime();
2900 2900 gethrestime(&pmcs_tbuf_ptr->timestamp);
2901 2901
2902 2902 if (pwp->fw_timestamp != 0) {
2903 2903 /* Calculate the approximate firmware time stamp... */
2904 2904 pmcs_tbuf_ptr->fw_timestamp = pwp->fw_timestamp +
2905 2905 ((hrtimestamp - pwp->hrtimestamp) / PMCS_FWLOG_TIMER_DIV);
2906 2906 } else {
2907 2907 pmcs_tbuf_ptr->fw_timestamp = 0;
2908 2908 }
2909 2909
2910 2910 ptr = pmcs_tbuf_ptr->buf;
2911 2911
2912 2912 /*
2913 2913 * Store the pertinent PHY and target information if there is any
2914 2914 */
2915 2915 if (target == NULL) {
2916 2916 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM;
2917 2917 pmcs_tbuf_ptr->target_ua[0] = '\0';
2918 2918 } else {
2919 2919 pmcs_tbuf_ptr->target_num = target->target_num;
2920 2920 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua,
2921 2921 PMCS_TBUF_UA_MAX_SIZE);
2922 2922 }
2923 2923
2924 2924 if (phyp == NULL) {
2925 2925 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8);
2926 2926 pmcs_tbuf_ptr->phy_path[0] = '\0';
2927 2927 pmcs_tbuf_ptr->phy_dtype = NOTHING;
2928 2928 } else {
2929 2929 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address,
2930 2930 phyp->sas_address, 8);
2931 2931 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32);
2932 2932 pmcs_tbuf_ptr->phy_dtype = phyp->dtype;
2933 2933 }
2934 2934
2935 2935 written += snprintf(ptr, elem_size, "pmcs%d:%d: ",
2936 2936 ddi_get_instance(pwp->dip), level);
2937 2937 ptr += strlen(ptr);
2938 2938 va_start(ap, fmt);
2939 2939 written += vsnprintf(ptr, elem_size - written, fmt, ap);
2940 2940 va_end(ap);
2941 2941 if (written > elem_size - 1) {
2942 2942 /* Indicate truncation */
2943 2943 pmcs_tbuf_ptr->buf[elem_size - 1] = '+';
2944 2944 }
2945 2945 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) {
2946 2946 pmcs_tbuf_ptr = pmcs_tbuf;
2947 2947 pmcs_tbuf_wrap = B_TRUE;
2948 2948 pmcs_tbuf_idx = 0;
2949 2949 } else {
2950 2950 ++pmcs_tbuf_ptr;
2951 2951 }
2952 2952 mutex_exit(&pmcs_trace_lock);
2953 2953
2954 2954 /*
2955 2955 * When pmcs_force_syslog in non-zero, everything goes also
2956 2956 * to syslog, at CE_CONT level.
2957 2957 */
2958 2958 if (pmcs_force_syslog) {
2959 2959 system_log = B_TRUE;
2960 2960 system_log_level = CE_CONT;
2961 2961 }
2962 2962
2963 2963 /*
2964 2964 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also
2965 2965 * goes to syslog.
2966 2966 */
2967 2967 if (system_log) {
2968 2968 char local[196];
2969 2969
2970 2970 switch (system_log_level) {
2971 2971 case CE_CONT:
2972 2972 (void) snprintf(local, sizeof (local), "%sINFO: ",
2973 2973 pmcs_console ? "" : "?");
2974 2974 break;
2975 2975 case CE_NOTE:
2976 2976 case CE_WARN:
2977 2977 local[0] = 0;
2978 2978 break;
2979 2979 default:
2980 2980 return;
2981 2981 }
2982 2982
2983 2983 ptr = local;
2984 2984 ptr += strlen(local);
2985 2985 (void) snprintf(ptr, (sizeof (local)) -
2986 2986 ((size_t)ptr - (size_t)local), "pmcs%d: ",
2987 2987 ddi_get_instance(pwp->dip));
2988 2988 ptr += strlen(ptr);
2989 2989 va_start(ap, fmt);
2990 2990 (void) vsnprintf(ptr,
2991 2991 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap);
2992 2992 va_end(ap);
2993 2993 if (level == CE_CONT) {
2994 2994 (void) strlcat(local, "\n", sizeof (local));
2995 2995 }
2996 2996 cmn_err(system_log_level, local);
2997 2997 }
2998 2998
2999 2999 }
3000 3000
3001 3001 /*
3002 3002 * pmcs_acquire_scratch
3003 3003 *
3004 3004 * If "wait" is true, the caller will wait until it can acquire the scratch.
3005 3005 * This implies the caller needs to be in a context where spinning for an
3006 3006 * indeterminate amount of time is acceptable.
3007 3007 */
3008 3008 int
3009 3009 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait)
3010 3010 {
3011 3011 int rval;
3012 3012
3013 3013 if (!wait) {
3014 3014 return (atomic_swap_8(&pwp->scratch_locked, 1));
3015 3015 }
3016 3016
3017 3017 /*
3018 3018 * Caller will wait for scratch.
3019 3019 */
3020 3020 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) {
3021 3021 drv_usecwait(100);
3022 3022 }
3023 3023
3024 3024 return (rval);
3025 3025 }
3026 3026
3027 3027 void
3028 3028 pmcs_release_scratch(pmcs_hw_t *pwp)
3029 3029 {
3030 3030 pwp->scratch_locked = 0;
3031 3031 }
3032 3032
3033 3033 /* Called with iport_lock and phy lock held */
3034 3034 void
3035 3035 pmcs_create_one_phy_stats(pmcs_iport_t *iport, pmcs_phy_t *phyp)
3036 3036 {
3037 3037 sas_phy_stats_t *ps;
3038 3038 pmcs_hw_t *pwp;
3039 3039 int ndata;
3040 3040 char ks_name[KSTAT_STRLEN];
3041 3041
3042 3042 ASSERT(mutex_owned(&iport->lock));
3043 3043 pwp = iport->pwp;
3044 3044 ASSERT(pwp != NULL);
3045 3045 ASSERT(mutex_owned(&phyp->phy_lock));
3046 3046
3047 3047 if (phyp->phy_stats != NULL) {
3048 3048 /*
3049 3049 * Delete existing kstats with name containing
3050 3050 * old iport instance# and allow creation of
3051 3051 * new kstats with new iport instance# in the name.
3052 3052 */
3053 3053 kstat_delete(phyp->phy_stats);
3054 3054 }
3055 3055
3056 3056 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t));
3057 3057
3058 3058 (void) snprintf(ks_name, sizeof (ks_name),
3059 3059 "%s.%llx.%d.%d", ddi_driver_name(iport->dip),
3060 3060 (longlong_t)pwp->sas_wwns[0],
3061 3061 ddi_get_instance(iport->dip), phyp->phynum);
3062 3062
3063 3063 phyp->phy_stats = kstat_create("pmcs",
3064 3064 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS,
3065 3065 KSTAT_TYPE_NAMED, ndata, 0);
3066 3066
3067 3067 if (phyp->phy_stats == NULL) {
3068 3068 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL,
3069 3069 "%s: Failed to create %s kstats for PHY(0x%p) at %s",
3070 3070 __func__, ks_name, (void *)phyp, phyp->path);
3071 3071 return;
3072 3072 }
3073 3073
3074 3074 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data;
3075 3075
3076 3076 kstat_named_init(&ps->seconds_since_last_reset,
3077 3077 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG);
3078 3078 kstat_named_init(&ps->tx_frames,
3079 3079 "TxFrames", KSTAT_DATA_ULONGLONG);
3080 3080 kstat_named_init(&ps->rx_frames,
3081 3081 "RxFrames", KSTAT_DATA_ULONGLONG);
3082 3082 kstat_named_init(&ps->tx_words,
3083 3083 "TxWords", KSTAT_DATA_ULONGLONG);
3084 3084 kstat_named_init(&ps->rx_words,
3085 3085 "RxWords", KSTAT_DATA_ULONGLONG);
3086 3086 kstat_named_init(&ps->invalid_dword_count,
3087 3087 "InvalidDwordCount", KSTAT_DATA_ULONGLONG);
3088 3088 kstat_named_init(&ps->running_disparity_error_count,
3089 3089 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG);
3090 3090 kstat_named_init(&ps->loss_of_dword_sync_count,
3091 3091 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG);
3092 3092 kstat_named_init(&ps->phy_reset_problem_count,
3093 3093 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG);
3094 3094
3095 3095 phyp->phy_stats->ks_private = phyp;
3096 3096 phyp->phy_stats->ks_update = pmcs_update_phy_stats;
3097 3097 kstat_install(phyp->phy_stats);
3098 3098 }
3099 3099
3100 3100 static void
3101 3101 pmcs_create_all_phy_stats(pmcs_iport_t *iport)
3102 3102 {
3103 3103 pmcs_hw_t *pwp;
3104 3104 pmcs_phy_t *phyp;
3105 3105
3106 3106 ASSERT(iport != NULL);
3107 3107 pwp = iport->pwp;
3108 3108 ASSERT(pwp != NULL);
3109 3109
3110 3110 mutex_enter(&iport->lock);
3111 3111
3112 3112 for (phyp = list_head(&iport->phys);
3113 3113 phyp != NULL;
3114 3114 phyp = list_next(&iport->phys, phyp)) {
3115 3115
3116 3116 mutex_enter(&phyp->phy_lock);
3117 3117 pmcs_create_one_phy_stats(iport, phyp);
3118 3118 mutex_exit(&phyp->phy_lock);
3119 3119 }
3120 3120
3121 3121 mutex_exit(&iport->lock);
3122 3122 }
3123 3123
3124 3124 int
3125 3125 pmcs_update_phy_stats(kstat_t *ks, int rw)
3126 3126 {
3127 3127 int val, ret = DDI_FAILURE;
3128 3128 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private;
3129 3129 pmcs_hw_t *pwp = pptr->pwp;
3130 3130 sas_phy_stats_t *ps = ks->ks_data;
3131 3131
3132 3132 _NOTE(ARGUNUSED(rw));
3133 3133 ASSERT((pptr != NULL) && (pwp != NULL));
3134 3134
3135 3135 /*
3136 3136 * We just want to lock against other invocations of kstat;
3137 3137 * we don't need to pmcs_lock_phy() for this.
3138 3138 */
3139 3139 mutex_enter(&pptr->phy_lock);
3140 3140
3141 3141 /* Get Stats from Chip */
3142 3142 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum);
3143 3143 if (val == DDI_FAILURE)
3144 3144 goto fail;
3145 3145 ps->invalid_dword_count.value.ull = (unsigned long long)val;
3146 3146
3147 3147 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum);
3148 3148 if (val == DDI_FAILURE)
3149 3149 goto fail;
3150 3150 ps->running_disparity_error_count.value.ull = (unsigned long long)val;
3151 3151
3152 3152 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum);
3153 3153 if (val == DDI_FAILURE)
3154 3154 goto fail;
3155 3155 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val;
3156 3156
3157 3157 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum);
3158 3158 if (val == DDI_FAILURE)
3159 3159 goto fail;
3160 3160 ps->phy_reset_problem_count.value.ull = (unsigned long long)val;
3161 3161
3162 3162 ret = DDI_SUCCESS;
3163 3163 fail:
3164 3164 mutex_exit(&pptr->phy_lock);
3165 3165 return (ret);
3166 3166 }
3167 3167
3168 3168 /*ARGSUSED*/
3169 3169 static int
3170 3170 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
3171 3171 {
3172 3172 /*
3173 3173 * as the driver can always deal with an error in any dma or
3174 3174 * access handle, we can just return the fme_status value.
3175 3175 */
3176 3176 pci_ereport_post(dip, err, NULL);
3177 3177 return (err->fme_status);
3178 3178 }
3179 3179
3180 3180 static void
3181 3181 pmcs_fm_init(pmcs_hw_t *pwp)
3182 3182 {
3183 3183 ddi_iblock_cookie_t fm_ibc;
3184 3184
3185 3185 /* Only register with IO Fault Services if we have some capability */
3186 3186 if (pwp->fm_capabilities) {
3187 3187 /* Adjust access and dma attributes for FMA */
3188 3188 pwp->reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
3189 3189 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3190 3190 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3191 3191 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3192 3192 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3193 3193
3194 3194 /*
3195 3195 * Register capabilities with IO Fault Services.
3196 3196 */
3197 3197 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc);
3198 3198
3199 3199 /*
3200 3200 * Initialize pci ereport capabilities if ereport
3201 3201 * capable (should always be.)
3202 3202 */
3203 3203 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) ||
3204 3204 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
3205 3205 pci_ereport_setup(pwp->dip);
3206 3206 }
3207 3207
3208 3208 /*
3209 3209 * Register error callback if error callback capable.
3210 3210 */
3211 3211 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
3212 3212 ddi_fm_handler_register(pwp->dip,
3213 3213 pmcs_fm_error_cb, (void *) pwp);
3214 3214 }
3215 3215 }
3216 3216 }
3217 3217
3218 3218 static void
3219 3219 pmcs_fm_fini(pmcs_hw_t *pwp)
3220 3220 {
3221 3221 /* Only unregister FMA capabilities if registered */
3222 3222 if (pwp->fm_capabilities) {
3223 3223 /*
3224 3224 * Un-register error callback if error callback capable.
3225 3225 */
3226 3226 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
3227 3227 ddi_fm_handler_unregister(pwp->dip);
3228 3228 }
3229 3229
3230 3230 /*
3231 3231 * Release any resources allocated by pci_ereport_setup()
3232 3232 */
3233 3233 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) ||
3234 3234 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
3235 3235 pci_ereport_teardown(pwp->dip);
3236 3236 }
3237 3237
3238 3238 /* Unregister from IO Fault Services */
3239 3239 ddi_fm_fini(pwp->dip);
3240 3240
3241 3241 /* Adjust access and dma attributes for FMA */
3242 3242 pwp->reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3243 3243 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
3244 3244 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
3245 3245 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
3246 3246 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
3247 3247 }
3248 3248 }
3249 3249
3250 3250 static boolean_t
3251 3251 pmcs_fabricate_wwid(pmcs_hw_t *pwp)
3252 3252 {
3253 3253 char *cp, c;
3254 3254 uint64_t adr;
3255 3255 int i;
3256 3256
3257 3257 cp = &c;
3258 3258 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr);
3259 3259
3260 3260 if (adr == 0) {
3261 3261 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
3262 3262 "%s: No serial number available to fabricate WWN",
3263 3263 __func__);
3264 3264
3265 3265 adr = (uint64_t)gethrtime();
3266 3266 }
3267 3267
3268 3268 adr <<= 8;
3269 3269 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52);
3270 3270 adr |= (5ULL << 60);
3271 3271
3272 3272 for (i = 0; i < PMCS_MAX_PORTS; i++) {
3273 3273 pwp->sas_wwns[i] = adr + i;
3274 3274 }
3275 3275
3276 3276 return (B_TRUE);
3277 3277 }
↓ open down ↓ |
3110 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX