Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon.c
28 28 * Hermon (InfiniBand) HCA Driver attach/detach Routines
29 29 *
30 30 * Implements all the routines necessary for the attach, setup,
31 31 * initialization (and subsequent possible teardown and detach) of the
32 32 * Hermon InfiniBand HCA driver.
33 33 */
34 34
35 35 #include <sys/types.h>
36 36 #include <sys/file.h>
37 37 #include <sys/open.h>
38 38 #include <sys/conf.h>
39 39 #include <sys/ddi.h>
40 40 #include <sys/sunddi.h>
41 41 #include <sys/modctl.h>
42 42 #include <sys/stat.h>
43 43 #include <sys/pci.h>
44 44 #include <sys/pci_cap.h>
45 45 #include <sys/bitmap.h>
46 46 #include <sys/policy.h>
47 47
48 48 #include <sys/ib/adapters/hermon/hermon.h>
49 49
50 50 /* /etc/system can tune this down, if that is desirable. */
51 51 int hermon_msix_max = HERMON_MSIX_MAX;
52 52
53 53 /* The following works around a problem in pre-2_7_000 firmware. */
54 54 #define HERMON_FW_WORKAROUND
55 55
56 56 int hermon_verbose = 0;
57 57
58 58 /* Hermon HCA State Pointer */
59 59 void *hermon_statep;
60 60
61 61 int debug_vpd = 0;
62 62
63 63 /* Disable the internal error-check polling thread */
64 64 int hermon_no_inter_err_chk = 0;
65 65
66 66 /*
67 67 * The Hermon "userland resource database" is common to instances of the
68 68 * Hermon HCA driver. This structure "hermon_userland_rsrc_db" contains all
69 69 * the necessary information to maintain it.
70 70 */
71 71 hermon_umap_db_t hermon_userland_rsrc_db;
72 72
73 73 static int hermon_attach(dev_info_t *, ddi_attach_cmd_t);
74 74 static int hermon_detach(dev_info_t *, ddi_detach_cmd_t);
75 75 static int hermon_open(dev_t *, int, int, cred_t *);
76 76 static int hermon_close(dev_t, int, int, cred_t *);
77 77 static int hermon_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
78 78
79 79 static int hermon_drv_init(hermon_state_t *state, dev_info_t *dip,
80 80 int instance);
81 81 static void hermon_drv_fini(hermon_state_t *state);
82 82 static void hermon_drv_fini2(hermon_state_t *state);
83 83 static int hermon_isr_init(hermon_state_t *state);
84 84 static void hermon_isr_fini(hermon_state_t *state);
85 85
86 86 static int hermon_hw_init(hermon_state_t *state);
87 87
88 88 static void hermon_hw_fini(hermon_state_t *state,
89 89 hermon_drv_cleanup_level_t cleanup);
90 90 static int hermon_soft_state_init(hermon_state_t *state);
91 91 static void hermon_soft_state_fini(hermon_state_t *state);
92 92 static int hermon_icm_config_setup(hermon_state_t *state,
93 93 hermon_hw_initqueryhca_t *inithca);
94 94 static void hermon_icm_tables_init(hermon_state_t *state);
95 95 static void hermon_icm_tables_fini(hermon_state_t *state);
96 96 static int hermon_icm_dma_init(hermon_state_t *state);
97 97 static void hermon_icm_dma_fini(hermon_state_t *state);
98 98 static void hermon_inithca_set(hermon_state_t *state,
99 99 hermon_hw_initqueryhca_t *inithca);
100 100 static int hermon_hca_port_init(hermon_state_t *state);
101 101 static int hermon_hca_ports_shutdown(hermon_state_t *state, uint_t num_init);
102 102 static int hermon_internal_uarpg_init(hermon_state_t *state);
103 103 static void hermon_internal_uarpg_fini(hermon_state_t *state);
104 104 static int hermon_special_qp_contexts_reserve(hermon_state_t *state);
105 105 static void hermon_special_qp_contexts_unreserve(hermon_state_t *state);
106 106 static int hermon_sw_reset(hermon_state_t *state);
107 107 static int hermon_mcg_init(hermon_state_t *state);
108 108 static void hermon_mcg_fini(hermon_state_t *state);
109 109 static int hermon_fw_version_check(hermon_state_t *state);
110 110 static void hermon_device_info_report(hermon_state_t *state);
111 111 static int hermon_pci_capability_list(hermon_state_t *state,
112 112 ddi_acc_handle_t hdl);
113 113 static void hermon_pci_capability_vpd(hermon_state_t *state,
114 114 ddi_acc_handle_t hdl, uint_t offset);
115 115 static int hermon_pci_read_vpd(ddi_acc_handle_t hdl, uint_t offset,
116 116 uint32_t addr, uint32_t *data);
117 117 static int hermon_intr_or_msi_init(hermon_state_t *state);
118 118 static int hermon_add_intrs(hermon_state_t *state, int intr_type);
119 119 static int hermon_intr_or_msi_fini(hermon_state_t *state);
120 120 void hermon_pci_capability_msix(hermon_state_t *state, ddi_acc_handle_t hdl,
121 121 uint_t offset);
122 122
123 123 static uint64_t hermon_size_icm(hermon_state_t *state);
124 124
125 125 /* X86 fastreboot support */
126 126 static ushort_t get_msix_ctrl(dev_info_t *);
127 127 static size_t get_msix_tbl_size(dev_info_t *);
128 128 static size_t get_msix_pba_size(dev_info_t *);
129 129 static void hermon_set_msix_info(hermon_state_t *);
130 130 static int hermon_intr_disable(hermon_state_t *);
131 131 static int hermon_quiesce(dev_info_t *);
132 132
133 133
134 134 /* Character/Block Operations */
135 135 static struct cb_ops hermon_cb_ops = {
136 136 hermon_open, /* open */
137 137 hermon_close, /* close */
138 138 nodev, /* strategy (block) */
139 139 nodev, /* print (block) */
140 140 nodev, /* dump (block) */
141 141 nodev, /* read */
142 142 nodev, /* write */
143 143 hermon_ioctl, /* ioctl */
144 144 hermon_devmap, /* devmap */
145 145 NULL, /* mmap */
146 146 nodev, /* segmap */
147 147 nochpoll, /* chpoll */
148 148 ddi_prop_op, /* prop_op */
149 149 NULL, /* streams */
150 150 D_NEW | D_MP |
151 151 D_64BIT | D_HOTPLUG |
152 152 D_DEVMAP, /* flags */
153 153 CB_REV /* rev */
154 154 };
155 155
156 156 /* Driver Operations */
157 157 static struct dev_ops hermon_ops = {
158 158 DEVO_REV, /* struct rev */
159 159 0, /* refcnt */
160 160 hermon_getinfo, /* getinfo */
161 161 nulldev, /* identify */
162 162 nulldev, /* probe */
163 163 hermon_attach, /* attach */
164 164 hermon_detach, /* detach */
165 165 nodev, /* reset */
166 166 &hermon_cb_ops, /* cb_ops */
167 167 NULL, /* bus_ops */
168 168 nodev, /* power */
169 169 hermon_quiesce, /* devo_quiesce */
170 170 };
171 171
↓ open down ↓ |
171 lines elided |
↑ open up ↑ |
172 172 /* Module Driver Info */
173 173 static struct modldrv hermon_modldrv = {
174 174 &mod_driverops,
175 175 "ConnectX IB Driver",
176 176 &hermon_ops
177 177 };
178 178
179 179 /* Module Linkage */
180 180 static struct modlinkage hermon_modlinkage = {
181 181 MODREV_1,
182 - &hermon_modldrv,
183 - NULL
182 + { &hermon_modldrv, NULL }
184 183 };
185 184
186 185 /*
187 186 * This extern refers to the ibc_operations_t function vector that is defined
188 187 * in the hermon_ci.c file.
189 188 */
190 189 extern ibc_operations_t hermon_ibc_ops;
191 190
192 191 /*
193 192 * _init()
194 193 */
195 194 int
196 195 _init()
197 196 {
198 197 int status;
199 198
200 199 status = ddi_soft_state_init(&hermon_statep, sizeof (hermon_state_t),
201 200 (size_t)HERMON_INITIAL_STATES);
202 201 if (status != 0) {
203 202 return (status);
204 203 }
205 204
206 205 status = ibc_init(&hermon_modlinkage);
207 206 if (status != 0) {
208 207 ddi_soft_state_fini(&hermon_statep);
209 208 return (status);
210 209 }
211 210
212 211 status = mod_install(&hermon_modlinkage);
213 212 if (status != 0) {
214 213 ibc_fini(&hermon_modlinkage);
215 214 ddi_soft_state_fini(&hermon_statep);
216 215 return (status);
217 216 }
218 217
219 218 /* Initialize the Hermon "userland resources database" */
220 219 hermon_umap_db_init();
221 220
222 221 return (status);
223 222 }
224 223
225 224
226 225 /*
227 226 * _info()
228 227 */
229 228 int
230 229 _info(struct modinfo *modinfop)
231 230 {
232 231 int status;
233 232
234 233 status = mod_info(&hermon_modlinkage, modinfop);
235 234 return (status);
236 235 }
237 236
238 237
239 238 /*
240 239 * _fini()
241 240 */
242 241 int
243 242 _fini()
244 243 {
245 244 int status;
246 245
247 246 status = mod_remove(&hermon_modlinkage);
248 247 if (status != 0) {
249 248 return (status);
250 249 }
251 250
252 251 /* Destroy the Hermon "userland resources database" */
253 252 hermon_umap_db_fini();
254 253
255 254 ibc_fini(&hermon_modlinkage);
256 255 ddi_soft_state_fini(&hermon_statep);
257 256
258 257 return (status);
259 258 }
260 259
261 260
262 261 /*
263 262 * hermon_getinfo()
264 263 */
265 264 /* ARGSUSED */
266 265 static int
267 266 hermon_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
268 267 {
269 268 dev_t dev;
270 269 hermon_state_t *state;
271 270 minor_t instance;
272 271
273 272 switch (cmd) {
274 273 case DDI_INFO_DEVT2DEVINFO:
275 274 dev = (dev_t)arg;
276 275 instance = HERMON_DEV_INSTANCE(dev);
277 276 state = ddi_get_soft_state(hermon_statep, instance);
278 277 if (state == NULL) {
279 278 return (DDI_FAILURE);
280 279 }
281 280 *result = (void *)state->hs_dip;
282 281 return (DDI_SUCCESS);
283 282
284 283 case DDI_INFO_DEVT2INSTANCE:
285 284 dev = (dev_t)arg;
286 285 instance = HERMON_DEV_INSTANCE(dev);
287 286 *result = (void *)(uintptr_t)instance;
288 287 return (DDI_SUCCESS);
289 288
290 289 default:
291 290 break;
292 291 }
293 292
294 293 return (DDI_FAILURE);
295 294 }
296 295
297 296
298 297 /*
299 298 * hermon_open()
300 299 */
301 300 /* ARGSUSED */
302 301 static int
303 302 hermon_open(dev_t *devp, int flag, int otyp, cred_t *credp)
304 303 {
305 304 hermon_state_t *state;
306 305 hermon_rsrc_t *rsrcp;
307 306 hermon_umap_db_entry_t *umapdb, *umapdb2;
308 307 minor_t instance;
309 308 uint64_t key, value;
310 309 uint_t hr_indx;
311 310 dev_t dev;
312 311 int status;
313 312
314 313 instance = HERMON_DEV_INSTANCE(*devp);
315 314 state = ddi_get_soft_state(hermon_statep, instance);
316 315 if (state == NULL) {
317 316 return (ENXIO);
318 317 }
319 318
320 319 /*
321 320 * Only allow driver to be opened for character access, and verify
322 321 * whether exclusive access is allowed.
323 322 */
324 323 if ((otyp != OTYP_CHR) || ((flag & FEXCL) &&
325 324 secpolicy_excl_open(credp) != 0)) {
326 325 return (EINVAL);
327 326 }
328 327
329 328 /*
330 329 * Search for the current process PID in the "userland resources
331 330 * database". If it is not found, then attempt to allocate a UAR
332 331 * page and add the ("key", "value") pair to the database.
333 332 * Note: As a last step we always return a devp appropriate for
334 333 * the open. Either we return a new minor number (based on the
335 334 * instance and the UAR page index) or we return the current minor
336 335 * number for the given client process.
337 336 *
338 337 * We also add an entry to the database to allow for lookup from
339 338 * "dev_t" to the current process PID. This is necessary because,
340 339 * under certain circumstance, the process PID that calls the Hermon
341 340 * close() entry point may not be the same as the one who called
342 341 * open(). Specifically, this can happen if a child process calls
343 342 * the Hermon's open() entry point, gets a UAR page, maps it out (using
344 343 * mmap()), and then exits without calling munmap(). Because mmap()
345 344 * adds a reference to the file descriptor, at the exit of the child
346 345 * process the file descriptor is "inherited" by the parent (and will
347 346 * be close()'d by the parent's PID only when it exits).
348 347 *
349 348 * Note: We use the hermon_umap_db_find_nolock() and
350 349 * hermon_umap_db_add_nolock() database access routines below (with
351 350 * an explicit mutex_enter of the database lock - "hdl_umapdb_lock")
352 351 * to ensure that the multiple accesses (in this case searching for,
353 352 * and then adding _two_ database entries) can be done atomically.
354 353 */
355 354 key = ddi_get_pid();
356 355 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
357 356 status = hermon_umap_db_find_nolock(instance, key,
358 357 MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
359 358 if (status != DDI_SUCCESS) {
360 359 /*
361 360 * If we are in 'maintenance mode', we cannot alloc a UAR page.
362 361 * But we still need some rsrcp value, and a mostly unique
363 362 * hr_indx value. So we set rsrcp to NULL for maintenance
364 363 * mode, and use a rolling count for hr_indx. The field
365 364 * 'hs_open_hr_indx' is used only in this maintenance mode
366 365 * condition.
367 366 *
368 367 * Otherwise, if we are in operational mode then we allocate
369 368 * the UAR page as normal, and use the rsrcp value and tr_indx
370 369 * value from that allocation.
371 370 */
372 371 if (!HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
373 372 rsrcp = NULL;
374 373 hr_indx = state->hs_open_ar_indx++;
375 374 } else {
376 375 /* Allocate a new UAR page for this process */
377 376 status = hermon_rsrc_alloc(state, HERMON_UARPG, 1,
378 377 HERMON_NOSLEEP, &rsrcp);
379 378 if (status != DDI_SUCCESS) {
380 379 mutex_exit(
381 380 &hermon_userland_rsrc_db.hdl_umapdb_lock);
382 381 return (EAGAIN);
383 382 }
384 383
385 384 hr_indx = rsrcp->hr_indx;
386 385 }
387 386
388 387 /*
389 388 * Allocate an entry to track the UAR page resource in the
390 389 * "userland resources database".
391 390 */
392 391 umapdb = hermon_umap_db_alloc(instance, key,
393 392 MLNX_UMAP_UARPG_RSRC, (uint64_t)(uintptr_t)rsrcp);
394 393 if (umapdb == NULL) {
395 394 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
396 395 /* If in "maintenance mode", don't free the rsrc */
397 396 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
398 397 hermon_rsrc_free(state, &rsrcp);
399 398 }
400 399 return (EAGAIN);
401 400 }
402 401
403 402 /*
404 403 * Create a new device number. Minor number is a function of
405 404 * the UAR page index (15 bits) and the device instance number
406 405 * (3 bits).
407 406 */
408 407 dev = makedevice(getmajor(*devp), (hr_indx <<
409 408 HERMON_MINORNUM_SHIFT) | instance);
410 409
411 410 /*
412 411 * Allocate another entry in the "userland resources database"
413 412 * to track the association of the device number (above) to
414 413 * the current process ID (in "key").
415 414 */
416 415 umapdb2 = hermon_umap_db_alloc(instance, dev,
417 416 MLNX_UMAP_PID_RSRC, (uint64_t)key);
418 417 if (umapdb2 == NULL) {
419 418 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
420 419 hermon_umap_db_free(umapdb);
421 420 /* If in "maintenance mode", don't free the rsrc */
422 421 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
423 422 hermon_rsrc_free(state, &rsrcp);
424 423 }
425 424 return (EAGAIN);
426 425 }
427 426
428 427 /* Add the entries to the database */
429 428 hermon_umap_db_add_nolock(umapdb);
430 429 hermon_umap_db_add_nolock(umapdb2);
431 430
432 431 } else {
433 432 /*
434 433 * Return the same device number as on the original open()
435 434 * call. This was calculated as a function of the UAR page
436 435 * index (top 16 bits) and the device instance number
437 436 */
438 437 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
439 438 dev = makedevice(getmajor(*devp), (rsrcp->hr_indx <<
440 439 HERMON_MINORNUM_SHIFT) | instance);
441 440 }
442 441 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
443 442
444 443 *devp = dev;
445 444
446 445 return (0);
447 446 }
448 447
449 448
450 449 /*
451 450 * hermon_close()
452 451 */
453 452 /* ARGSUSED */
454 453 static int
455 454 hermon_close(dev_t dev, int flag, int otyp, cred_t *credp)
456 455 {
457 456 hermon_state_t *state;
458 457 hermon_rsrc_t *rsrcp;
459 458 hermon_umap_db_entry_t *umapdb;
460 459 hermon_umap_db_priv_t *priv;
461 460 minor_t instance;
462 461 uint64_t key, value;
463 462 int status, reset_status = 0;
464 463
465 464 instance = HERMON_DEV_INSTANCE(dev);
466 465 state = ddi_get_soft_state(hermon_statep, instance);
467 466 if (state == NULL) {
468 467 return (ENXIO);
469 468 }
470 469
471 470 /*
472 471 * Search for "dev_t" in the "userland resources database". As
473 472 * explained above in hermon_open(), we can't depend on using the
474 473 * current process ID here to do the lookup because the process
475 474 * that ultimately closes may not be the same one who opened
476 475 * (because of inheritance).
477 476 * So we lookup the "dev_t" (which points to the PID of the process
478 477 * that opened), and we remove the entry from the database (and free
479 478 * it up). Then we do another query based on the PID value. And when
480 479 * we find that database entry, we free it up too and then free the
481 480 * Hermon UAR page resource.
482 481 *
483 482 * Note: We use the hermon_umap_db_find_nolock() database access
484 483 * routine below (with an explicit mutex_enter of the database lock)
485 484 * to ensure that the multiple accesses (which attempt to remove the
486 485 * two database entries) can be done atomically.
487 486 *
488 487 * This works the same in both maintenance mode and HCA mode, except
489 488 * for the call to hermon_rsrc_free(). In the case of maintenance mode,
490 489 * this call is not needed, as it was not allocated in hermon_open()
491 490 * above.
492 491 */
493 492 key = dev;
494 493 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
495 494 status = hermon_umap_db_find_nolock(instance, key, MLNX_UMAP_PID_RSRC,
496 495 &value, HERMON_UMAP_DB_REMOVE, &umapdb);
497 496 if (status == DDI_SUCCESS) {
498 497 /*
499 498 * If the "hdb_priv" field is non-NULL, it indicates that
500 499 * some "on close" handling is still necessary. Call
501 500 * hermon_umap_db_handle_onclose_cb() to do the handling (i.e.
502 501 * to invoke all the registered callbacks). Then free up
503 502 * the resources associated with "hdb_priv" and continue
504 503 * closing.
505 504 */
506 505 priv = (hermon_umap_db_priv_t *)umapdb->hdbe_common.hdb_priv;
507 506 if (priv != NULL) {
508 507 reset_status = hermon_umap_db_handle_onclose_cb(priv);
509 508 kmem_free(priv, sizeof (hermon_umap_db_priv_t));
510 509 umapdb->hdbe_common.hdb_priv = (void *)NULL;
511 510 }
512 511
513 512 hermon_umap_db_free(umapdb);
514 513
515 514 /*
516 515 * Now do another lookup using PID as the key (copy it from
517 516 * "value"). When this lookup is complete, the "value" field
518 517 * will contain the hermon_rsrc_t pointer for the UAR page
519 518 * resource.
520 519 */
521 520 key = value;
522 521 status = hermon_umap_db_find_nolock(instance, key,
523 522 MLNX_UMAP_UARPG_RSRC, &value, HERMON_UMAP_DB_REMOVE,
524 523 &umapdb);
525 524 if (status == DDI_SUCCESS) {
526 525 hermon_umap_db_free(umapdb);
527 526 /* If in "maintenance mode", don't free the rsrc */
528 527 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
529 528 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
530 529 hermon_rsrc_free(state, &rsrcp);
531 530 }
532 531 }
533 532 }
534 533 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
535 534 return (reset_status);
536 535 }
537 536
538 537
539 538 /*
540 539 * hermon_attach()
541 540 * Context: Only called from attach() path context
542 541 */
543 542 static int
544 543 hermon_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
545 544 {
546 545 hermon_state_t *state;
547 546 ibc_clnt_hdl_t tmp_ibtfpriv;
548 547 ibc_status_t ibc_status;
549 548 int instance;
550 549 int status;
551 550
552 551 #ifdef __lock_lint
553 552 (void) hermon_quiesce(dip);
554 553 #endif
555 554
556 555 switch (cmd) {
557 556 case DDI_ATTACH:
558 557 instance = ddi_get_instance(dip);
559 558 status = ddi_soft_state_zalloc(hermon_statep, instance);
560 559 if (status != DDI_SUCCESS) {
561 560 cmn_err(CE_NOTE, "hermon%d: driver failed to attach: "
562 561 "attach_ssz_fail", instance);
563 562 goto fail_attach_nomsg;
564 563
565 564 }
566 565 state = ddi_get_soft_state(hermon_statep, instance);
567 566 if (state == NULL) {
568 567 ddi_soft_state_free(hermon_statep, instance);
569 568 cmn_err(CE_NOTE, "hermon%d: driver failed to attach: "
570 569 "attach_gss_fail", instance);
571 570 goto fail_attach_nomsg;
572 571 }
573 572
574 573 /* clear the attach error buffer */
575 574 HERMON_ATTACH_MSG_INIT(state->hs_attach_buf);
576 575
577 576 /* Save away devinfo and instance before hermon_fm_init() */
578 577 state->hs_dip = dip;
579 578 state->hs_instance = instance;
580 579
581 580 hermon_fm_init(state);
582 581
583 582 /*
584 583 * Initialize Hermon driver and hardware.
585 584 *
586 585 * Note: If this initialization fails we may still wish to
587 586 * create a device node and remain operational so that Hermon
588 587 * firmware can be updated/flashed (i.e. "maintenance mode").
589 588 * If this is the case, then "hs_operational_mode" will be
590 589 * equal to HERMON_MAINTENANCE_MODE. We will not attempt to
591 590 * attach to the IBTF or register with the IBMF (i.e. no
592 591 * InfiniBand interfaces will be enabled).
593 592 */
594 593 status = hermon_drv_init(state, dip, instance);
595 594 if ((status != DDI_SUCCESS) &&
596 595 (HERMON_IS_OPERATIONAL(state->hs_operational_mode))) {
597 596 goto fail_attach;
598 597 }
599 598
600 599 /*
601 600 * Change the Hermon FM mode
602 601 */
603 602 if ((hermon_get_state(state) & HCA_PIO_FM) &&
604 603 HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
605 604 /*
606 605 * Now we wait for 50ms to give an opportunity
607 606 * to Solaris FMA so that HW errors can be notified.
608 607 * Then check if there are HW errors or not. If
609 608 * a HW error is detected, the Hermon attachment
610 609 * must be failed.
611 610 */
612 611 delay(drv_usectohz(50000));
613 612 if (hermon_init_failure(state)) {
614 613 hermon_drv_fini(state);
615 614 HERMON_WARNING(state, "unable to "
616 615 "attach Hermon due to a HW error");
617 616 HERMON_ATTACH_MSG(state->hs_attach_buf,
618 617 "hermon_attach_failure");
619 618 goto fail_attach;
620 619 }
621 620
622 621 /*
623 622 * There seems no HW errors during the attachment,
624 623 * so let's change the Hermon FM state to the
625 624 * ereport only mode.
626 625 */
627 626 if (hermon_fm_ereport_init(state) != DDI_SUCCESS) {
628 627 /* unwind the resources */
629 628 hermon_drv_fini(state);
630 629 HERMON_ATTACH_MSG(state->hs_attach_buf,
631 630 "hermon_attach_failure");
632 631 goto fail_attach;
633 632 }
634 633 }
635 634
636 635 /* Create the minor node for device */
637 636 status = ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
638 637 DDI_PSEUDO, 0);
639 638 if (status != DDI_SUCCESS) {
640 639 hermon_drv_fini(state);
641 640 HERMON_ATTACH_MSG(state->hs_attach_buf,
642 641 "attach_create_mn_fail");
643 642 goto fail_attach;
644 643 }
645 644
646 645 /*
647 646 * If we are in "maintenance mode", then we don't want to
648 647 * register with the IBTF. All InfiniBand interfaces are
649 648 * uninitialized, and the device is only capable of handling
650 649 * requests to update/flash firmware (or test/debug requests).
651 650 */
652 651 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
653 652 cmn_err(CE_NOTE, "!Hermon is operational\n");
654 653
655 654 /* Attach to InfiniBand Transport Framework (IBTF) */
656 655 ibc_status = ibc_attach(&tmp_ibtfpriv,
657 656 &state->hs_ibtfinfo);
658 657 if (ibc_status != IBC_SUCCESS) {
659 658 cmn_err(CE_CONT, "hermon_attach: ibc_attach "
660 659 "failed\n");
661 660 ddi_remove_minor_node(dip, "devctl");
662 661 hermon_drv_fini(state);
663 662 HERMON_ATTACH_MSG(state->hs_attach_buf,
664 663 "attach_ibcattach_fail");
665 664 goto fail_attach;
666 665 }
667 666
668 667 /*
669 668 * Now that we've successfully attached to the IBTF,
670 669 * we enable all appropriate asynch and CQ events to
671 670 * be forwarded to the IBTF.
672 671 */
673 672 HERMON_ENABLE_IBTF_CALLB(state, tmp_ibtfpriv);
674 673
675 674 ibc_post_attach(state->hs_ibtfpriv);
676 675
677 676 /* Register agents with IB Mgmt Framework (IBMF) */
678 677 status = hermon_agent_handlers_init(state);
679 678 if (status != DDI_SUCCESS) {
680 679 (void) ibc_pre_detach(tmp_ibtfpriv, DDI_DETACH);
681 680 HERMON_QUIESCE_IBTF_CALLB(state);
682 681 if (state->hs_in_evcallb != 0) {
683 682 HERMON_WARNING(state, "unable to "
684 683 "quiesce Hermon IBTF callbacks");
685 684 }
686 685 ibc_detach(tmp_ibtfpriv);
687 686 ddi_remove_minor_node(dip, "devctl");
688 687 hermon_drv_fini(state);
689 688 HERMON_ATTACH_MSG(state->hs_attach_buf,
690 689 "attach_agentinit_fail");
691 690 goto fail_attach;
692 691 }
693 692 }
694 693
695 694 /* Report attach in maintenance mode, if appropriate */
696 695 if (!(HERMON_IS_OPERATIONAL(state->hs_operational_mode))) {
697 696 cmn_err(CE_NOTE, "hermon%d: driver attached "
698 697 "(for maintenance mode only)", state->hs_instance);
699 698 hermon_fm_ereport(state, HCA_IBA_ERR, HCA_ERR_DEGRADED);
700 699 }
701 700
702 701 /* Report that driver was loaded */
703 702 ddi_report_dev(dip);
704 703
705 704 /* Send device information to log file */
706 705 hermon_device_info_report(state);
707 706
708 707 /* DEBUG PRINT */
709 708 cmn_err(CE_CONT, "!Hermon attach complete\n");
710 709 return (DDI_SUCCESS);
711 710
712 711 case DDI_RESUME:
713 712 /* Add code here for DDI_RESUME XXX */
714 713 return (DDI_FAILURE);
715 714
716 715 default:
717 716 cmn_err(CE_WARN, "hermon_attach: unknown cmd (0x%x)\n", cmd);
718 717 break;
719 718 }
720 719
721 720 fail_attach:
722 721 cmn_err(CE_NOTE, "hermon%d: driver failed to attach: %s", instance,
723 722 state->hs_attach_buf);
724 723 if (hermon_get_state(state) & HCA_EREPORT_FM) {
725 724 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
726 725 }
727 726 hermon_drv_fini2(state);
728 727 hermon_fm_fini(state);
729 728 ddi_soft_state_free(hermon_statep, instance);
730 729
731 730 fail_attach_nomsg:
732 731 return (DDI_FAILURE);
733 732 }
734 733
735 734
736 735 /*
737 736 * hermon_detach()
738 737 * Context: Only called from detach() path context
739 738 */
740 739 static int
741 740 hermon_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
742 741 {
743 742 hermon_state_t *state;
744 743 ibc_clnt_hdl_t tmp_ibtfpriv;
745 744 ibc_status_t ibc_status;
746 745 int instance, status;
747 746
748 747 instance = ddi_get_instance(dip);
749 748 state = ddi_get_soft_state(hermon_statep, instance);
750 749 if (state == NULL) {
751 750 return (DDI_FAILURE);
752 751 }
753 752
754 753 switch (cmd) {
755 754 case DDI_DETACH:
756 755 /*
757 756 * If we are in "maintenance mode", then we do not want to
758 757 * do teardown for any of the InfiniBand interfaces.
759 758 * Specifically, this means not detaching from IBTF (we never
760 759 * attached to begin with) and not deregistering from IBMF.
761 760 */
762 761 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
763 762 /* Unregister agents from IB Mgmt Framework (IBMF) */
764 763 status = hermon_agent_handlers_fini(state);
765 764 if (status != DDI_SUCCESS) {
766 765 return (DDI_FAILURE);
767 766 }
768 767
769 768 /*
770 769 * Attempt the "pre-detach" from InfiniBand Transport
771 770 * Framework (IBTF). At this point the IBTF is still
772 771 * capable of handling incoming asynch and completion
773 772 * events. This "pre-detach" is primarily a mechanism
774 773 * to notify the appropriate IBTF clients that the
775 774 * HCA is being removed/offlined.
776 775 */
777 776 ibc_status = ibc_pre_detach(state->hs_ibtfpriv, cmd);
778 777 if (ibc_status != IBC_SUCCESS) {
779 778 status = hermon_agent_handlers_init(state);
780 779 if (status != DDI_SUCCESS) {
781 780 HERMON_WARNING(state, "failed to "
782 781 "restart Hermon agents");
783 782 }
784 783 return (DDI_FAILURE);
785 784 }
786 785
787 786 /*
788 787 * Before we can fully detach from the IBTF we need to
789 788 * ensure that we have handled all outstanding event
790 789 * callbacks. This is accomplished by quiescing the
791 790 * event callback mechanism. Note: if we are unable
792 791 * to successfully quiesce the callbacks, then this is
793 792 * an indication that something has probably gone
794 793 * seriously wrong. We print out a warning, but
795 794 * continue.
796 795 */
797 796 tmp_ibtfpriv = state->hs_ibtfpriv;
798 797 HERMON_QUIESCE_IBTF_CALLB(state);
799 798 if (state->hs_in_evcallb != 0) {
800 799 HERMON_WARNING(state, "unable to quiesce "
801 800 "Hermon IBTF callbacks");
802 801 }
803 802
804 803 /* Complete the detach from the IBTF */
805 804 ibc_detach(tmp_ibtfpriv);
806 805 }
807 806
808 807 /* Remove the minor node for device */
809 808 ddi_remove_minor_node(dip, "devctl");
810 809
811 810 /*
812 811 * Only call hermon_drv_fini() if we are in Hermon HCA mode.
813 812 * (Because if we are in "maintenance mode", then we never
814 813 * successfully finished init.) Only report successful
815 814 * detach for normal HCA mode.
816 815 */
817 816 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
818 817 /* Cleanup driver resources and shutdown hardware */
819 818 hermon_drv_fini(state);
820 819 cmn_err(CE_CONT, "!Hermon driver successfully "
821 820 "detached\n");
822 821 }
823 822
824 823 hermon_drv_fini2(state);
825 824 hermon_fm_fini(state);
826 825 ddi_soft_state_free(hermon_statep, instance);
827 826
828 827 return (DDI_SUCCESS);
829 828
830 829 case DDI_SUSPEND:
831 830 /* Add code here for DDI_SUSPEND XXX */
832 831 return (DDI_FAILURE);
833 832
834 833 default:
835 834 cmn_err(CE_WARN, "hermon_detach: unknown cmd (0x%x)\n", cmd);
836 835 break;
837 836 }
838 837
839 838 return (DDI_FAILURE);
840 839 }
841 840
842 841 /*
843 842 * hermon_dma_attr_init()
844 843 * Context: Can be called from interrupt or base context.
845 844 */
846 845
847 846 /* ARGSUSED */
848 847 void
849 848 hermon_dma_attr_init(hermon_state_t *state, ddi_dma_attr_t *dma_attr)
850 849 {
851 850 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dma_attr))
852 851
853 852 dma_attr->dma_attr_version = DMA_ATTR_V0;
854 853 dma_attr->dma_attr_addr_lo = 0;
855 854 dma_attr->dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull;
856 855 dma_attr->dma_attr_count_max = 0xFFFFFFFFFFFFFFFFull;
857 856 dma_attr->dma_attr_align = HERMON_PAGESIZE; /* default 4K */
858 857 dma_attr->dma_attr_burstsizes = 0x3FF;
859 858 dma_attr->dma_attr_minxfer = 1;
860 859 dma_attr->dma_attr_maxxfer = 0xFFFFFFFFFFFFFFFFull;
861 860 dma_attr->dma_attr_seg = 0xFFFFFFFFFFFFFFFFull;
862 861 dma_attr->dma_attr_sgllen = 0x7FFFFFFF;
863 862 dma_attr->dma_attr_granular = 1;
864 863 dma_attr->dma_attr_flags = 0;
865 864 }
866 865
867 866 /*
868 867 * hermon_dma_alloc()
869 868 * Context: Can be called from base context.
870 869 */
871 870 int
872 871 hermon_dma_alloc(hermon_state_t *state, hermon_dma_info_t *dma_info,
873 872 uint16_t opcode)
874 873 {
875 874 ddi_dma_handle_t dma_hdl;
876 875 ddi_dma_attr_t dma_attr;
877 876 ddi_acc_handle_t acc_hdl;
878 877 ddi_dma_cookie_t cookie;
879 878 uint64_t kaddr;
880 879 uint64_t real_len;
881 880 uint_t ccount;
882 881 int status;
883 882
884 883 hermon_dma_attr_init(state, &dma_attr);
885 884 #ifdef __sparc
886 885 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
887 886 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
888 887 #endif
889 888
890 889 /* Allocate a DMA handle */
891 890 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, DDI_DMA_SLEEP,
892 891 NULL, &dma_hdl);
893 892 if (status != DDI_SUCCESS) {
894 893 IBTF_DPRINTF_L2("DMA", "alloc handle failed: %d", status);
895 894 cmn_err(CE_CONT, "DMA alloc handle failed(status %d)", status);
896 895 return (DDI_FAILURE);
897 896 }
898 897
899 898 /* Allocate DMA memory */
900 899 status = ddi_dma_mem_alloc(dma_hdl, dma_info->length,
901 900 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
902 901 (caddr_t *)&kaddr, (size_t *)&real_len, &acc_hdl);
903 902 if (status != DDI_SUCCESS) {
904 903 ddi_dma_free_handle(&dma_hdl);
905 904 IBTF_DPRINTF_L2("DMA", "memory alloc failed: %d", status);
906 905 cmn_err(CE_CONT, "DMA memory alloc failed(status %d)", status);
907 906 return (DDI_FAILURE);
908 907 }
909 908 bzero((caddr_t)(uintptr_t)kaddr, real_len);
910 909
911 910 /* Bind the memory to the handle */
912 911 status = ddi_dma_addr_bind_handle(dma_hdl, NULL,
913 912 (caddr_t)(uintptr_t)kaddr, (size_t)real_len, DDI_DMA_RDWR |
914 913 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &cookie, &ccount);
915 914 if (status != DDI_SUCCESS) {
916 915 ddi_dma_mem_free(&acc_hdl);
917 916 ddi_dma_free_handle(&dma_hdl);
918 917 IBTF_DPRINTF_L2("DMA", "bind handle failed: %d", status);
919 918 cmn_err(CE_CONT, "DMA bind handle failed(status %d)", status);
920 919 return (DDI_FAILURE);
921 920 }
922 921
923 922 /* Package the hermon_dma_info contents and return */
924 923 dma_info->vaddr = kaddr;
925 924 dma_info->dma_hdl = dma_hdl;
926 925 dma_info->acc_hdl = acc_hdl;
927 926
928 927 /* Pass the mapping information to the firmware */
929 928 status = hermon_map_cmd_post(state, dma_info, opcode, cookie, ccount);
930 929 if (status != DDI_SUCCESS) {
931 930 char *s;
932 931 hermon_dma_free(dma_info);
933 932 switch (opcode) {
934 933 case MAP_ICM:
935 934 s = "MAP_ICM";
936 935 break;
937 936 case MAP_FA:
938 937 s = "MAP_FA";
939 938 break;
940 939 case MAP_ICM_AUX:
941 940 s = "MAP_ICM_AUX";
942 941 break;
943 942 default:
944 943 s = "UNKNOWN";
945 944 }
946 945 cmn_err(CE_NOTE, "Map cmd '%s' failed, status %08x\n",
947 946 s, status);
948 947 return (DDI_FAILURE);
949 948 }
950 949
951 950 return (DDI_SUCCESS);
952 951 }
953 952
954 953 /*
955 954 * hermon_dma_free()
956 955 * Context: Can be called from base context.
957 956 */
958 957 void
959 958 hermon_dma_free(hermon_dma_info_t *info)
960 959 {
961 960 /* Unbind the handles and free the memory */
962 961 (void) ddi_dma_unbind_handle(info->dma_hdl);
963 962 ddi_dma_mem_free(&info->acc_hdl);
964 963 ddi_dma_free_handle(&info->dma_hdl);
965 964 }
966 965
967 966 /* These macros are valid for use only in hermon_icm_alloc/hermon_icm_free. */
968 967 #define HERMON_ICM_ALLOC(rsrc) \
969 968 hermon_icm_alloc(state, rsrc, index1, index2)
970 969 #define HERMON_ICM_FREE(rsrc) \
971 970 hermon_icm_free(state, rsrc, index1, index2)
972 971
973 972 /*
974 973 * hermon_icm_alloc()
975 974 * Context: Can be called from base context.
976 975 *
977 976 * Only one thread can be here for a given hermon_rsrc_type_t "type".
978 977 *
979 978 * "num_to_hdl" is set if there is a need for lookups from resource
980 979 * number/index to resource handle. This is needed for QPs/CQs/SRQs
981 980 * for the various affiliated events/errors.
982 981 */
983 982 int
984 983 hermon_icm_alloc(hermon_state_t *state, hermon_rsrc_type_t type,
985 984 uint32_t index1, uint32_t index2)
986 985 {
987 986 hermon_icm_table_t *icm;
988 987 hermon_dma_info_t *dma_info;
989 988 uint8_t *bitmap;
990 989 int status;
991 990 int num_to_hdl = 0;
992 991
993 992 if (hermon_verbose) {
994 993 IBTF_DPRINTF_L2("hermon", "hermon_icm_alloc: rsrc_type (0x%x) "
995 994 "index1/2 (0x%x/0x%x)", type, index1, index2);
996 995 }
997 996
998 997 icm = &state->hs_icm[type];
999 998
1000 999 switch (type) {
1001 1000 case HERMON_QPC:
1002 1001 status = HERMON_ICM_ALLOC(HERMON_CMPT_QPC);
1003 1002 if (status != DDI_SUCCESS) {
1004 1003 return (status);
1005 1004 }
1006 1005 status = HERMON_ICM_ALLOC(HERMON_RDB);
1007 1006 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1008 1007 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1009 1008 return (status);
1010 1009 }
1011 1010 status = HERMON_ICM_ALLOC(HERMON_ALTC);
1012 1011 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1013 1012 HERMON_ICM_FREE(HERMON_RDB);
1014 1013 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1015 1014 return (status);
1016 1015 }
1017 1016 status = HERMON_ICM_ALLOC(HERMON_AUXC);
1018 1017 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1019 1018 HERMON_ICM_FREE(HERMON_ALTC);
1020 1019 HERMON_ICM_FREE(HERMON_RDB);
1021 1020 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1022 1021 return (status);
1023 1022 }
1024 1023 num_to_hdl = 1;
1025 1024 break;
1026 1025 case HERMON_SRQC:
1027 1026 status = HERMON_ICM_ALLOC(HERMON_CMPT_SRQC);
1028 1027 if (status != DDI_SUCCESS) {
1029 1028 return (status);
1030 1029 }
1031 1030 num_to_hdl = 1;
1032 1031 break;
1033 1032 case HERMON_CQC:
1034 1033 status = HERMON_ICM_ALLOC(HERMON_CMPT_CQC);
1035 1034 if (status != DDI_SUCCESS) {
1036 1035 return (status);
1037 1036 }
1038 1037 num_to_hdl = 1;
1039 1038 break;
1040 1039 case HERMON_EQC:
1041 1040 status = HERMON_ICM_ALLOC(HERMON_CMPT_EQC);
1042 1041 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1043 1042 return (status);
1044 1043 }
1045 1044 break;
1046 1045 }
1047 1046
1048 1047 /* ensure existence of bitmap and dmainfo, sets "dma_info" */
1049 1048 hermon_bitmap(bitmap, dma_info, icm, index1, num_to_hdl);
1050 1049
1051 1050 /* Set up the DMA handle for allocation and mapping */
1052 1051 dma_info += index2;
1053 1052 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dma_info))
1054 1053 dma_info->length = icm->span << icm->log_object_size;
1055 1054 dma_info->icmaddr = icm->icm_baseaddr +
1056 1055 (((index1 << icm->split_shift) +
1057 1056 (index2 << icm->span_shift)) << icm->log_object_size);
1058 1057
1059 1058 /* Allocate memory for the num_to_qp/cq/srq pointers */
1060 1059 if (num_to_hdl)
1061 1060 icm->num_to_hdl[index1][index2] =
1062 1061 kmem_zalloc(HERMON_ICM_SPAN * sizeof (void *), KM_SLEEP);
1063 1062
1064 1063 if (hermon_verbose) {
1065 1064 IBTF_DPRINTF_L2("hermon", "alloc DMA: "
1066 1065 "rsrc (0x%x) index (%x, %x) "
1067 1066 "icm_addr/len (%llx/%x) bitmap %p", type, index1, index2,
1068 1067 (longlong_t)dma_info->icmaddr, dma_info->length, bitmap);
1069 1068 }
1070 1069
1071 1070 /* Allocate and map memory for this span */
1072 1071 status = hermon_dma_alloc(state, dma_info, MAP_ICM);
1073 1072 if (status != DDI_SUCCESS) {
1074 1073 IBTF_DPRINTF_L2("hermon", "hermon_icm_alloc: DMA "
1075 1074 "allocation failed, status 0x%x", status);
1076 1075 switch (type) {
1077 1076 case HERMON_QPC:
1078 1077 HERMON_ICM_FREE(HERMON_AUXC);
1079 1078 HERMON_ICM_FREE(HERMON_ALTC);
1080 1079 HERMON_ICM_FREE(HERMON_RDB);
1081 1080 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1082 1081 break;
1083 1082 case HERMON_SRQC:
1084 1083 HERMON_ICM_FREE(HERMON_CMPT_SRQC);
1085 1084 break;
1086 1085 case HERMON_CQC:
1087 1086 HERMON_ICM_FREE(HERMON_CMPT_CQC);
1088 1087 break;
1089 1088 case HERMON_EQC:
1090 1089 HERMON_ICM_FREE(HERMON_CMPT_EQC);
1091 1090 break;
1092 1091 }
1093 1092
1094 1093 return (DDI_FAILURE);
1095 1094 }
1096 1095 if (hermon_verbose) {
1097 1096 IBTF_DPRINTF_L2("hermon", "hermon_icm_alloc: mapping ICM: "
1098 1097 "rsrc_type (0x%x) index (0x%x, 0x%x) alloc length (0x%x) "
1099 1098 "icm_addr (0x%lx)", type, index1, index2, dma_info->length,
1100 1099 dma_info->icmaddr);
1101 1100 }
1102 1101
1103 1102 /* Set the bit for this slot in the table bitmap */
1104 1103 HERMON_BMAP_BIT_SET(icm->icm_bitmap[index1], index2);
1105 1104
1106 1105 return (DDI_SUCCESS);
1107 1106 }
1108 1107
1109 1108 /*
1110 1109 * hermon_icm_free()
1111 1110 * Context: Can be called from base context.
1112 1111 *
1113 1112 * ICM resources have been successfully returned from hermon_icm_alloc().
1114 1113 * Associated dma_info is no longer in use. Free the ICM backing memory.
1115 1114 */
1116 1115 void
1117 1116 hermon_icm_free(hermon_state_t *state, hermon_rsrc_type_t type,
1118 1117 uint32_t index1, uint32_t index2)
1119 1118 {
1120 1119 hermon_icm_table_t *icm;
1121 1120 hermon_dma_info_t *dma_info;
1122 1121 int status;
1123 1122
1124 1123 icm = &state->hs_icm[type];
1125 1124 ASSERT(icm->icm_dma[index1][index2].icm_refcnt == 0);
1126 1125
1127 1126 if (hermon_verbose) {
1128 1127 IBTF_DPRINTF_L2("hermon", "hermon_icm_free: rsrc_type (0x%x) "
1129 1128 "index (0x%x, 0x%x)", type, index1, index2);
1130 1129 }
1131 1130
1132 1131 dma_info = icm->icm_dma[index1] + index2;
1133 1132
1134 1133 /* The following only happens if attach() is failing. */
1135 1134 if (dma_info == NULL)
1136 1135 return;
1137 1136
1138 1137 /* Unmap the ICM allocation, then free the backing DMA memory */
1139 1138 status = hermon_unmap_icm_cmd_post(state, dma_info);
1140 1139 if (status != DDI_SUCCESS) {
1141 1140 HERMON_WARNING(state, "UNMAP_ICM failure");
1142 1141 }
1143 1142 hermon_dma_free(dma_info);
1144 1143
1145 1144 /* Clear the bit in the ICM table bitmap */
1146 1145 HERMON_BMAP_BIT_CLR(icm->icm_bitmap[index1], index2);
1147 1146
1148 1147 switch (type) {
1149 1148 case HERMON_QPC:
1150 1149 HERMON_ICM_FREE(HERMON_AUXC);
1151 1150 HERMON_ICM_FREE(HERMON_ALTC);
1152 1151 HERMON_ICM_FREE(HERMON_RDB);
1153 1152 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1154 1153 break;
1155 1154 case HERMON_SRQC:
1156 1155 HERMON_ICM_FREE(HERMON_CMPT_SRQC);
1157 1156 break;
1158 1157 case HERMON_CQC:
1159 1158 HERMON_ICM_FREE(HERMON_CMPT_CQC);
1160 1159 break;
1161 1160 case HERMON_EQC:
1162 1161 HERMON_ICM_FREE(HERMON_CMPT_EQC);
1163 1162 break;
1164 1163
1165 1164 }
1166 1165 }
1167 1166
1168 1167
1169 1168 /*
1170 1169 * hermon_icm_num_to_hdl()
1171 1170 * Context: Can be called from base or interrupt context.
1172 1171 *
1173 1172 * Given an index of a resource, index through the sparsely allocated
1174 1173 * arrays to find the pointer to its software handle. Return NULL if
1175 1174 * any of the arrays of pointers has been freed (should never happen).
1176 1175 */
1177 1176 void *
1178 1177 hermon_icm_num_to_hdl(hermon_state_t *state, hermon_rsrc_type_t type,
1179 1178 uint32_t idx)
1180 1179 {
1181 1180 hermon_icm_table_t *icm;
1182 1181 uint32_t span_offset;
1183 1182 uint32_t index1, index2;
1184 1183 void ***p1, **p2;
1185 1184
1186 1185 icm = &state->hs_icm[type];
1187 1186 hermon_index(index1, index2, idx, icm, span_offset);
1188 1187 p1 = icm->num_to_hdl[index1];
1189 1188 if (p1 == NULL) {
1190 1189 IBTF_DPRINTF_L2("hermon", "icm_num_to_hdl failed at level 1"
1191 1190 ": rsrc_type %d, index 0x%x", type, idx);
1192 1191 return (NULL);
1193 1192 }
1194 1193 p2 = p1[index2];
1195 1194 if (p2 == NULL) {
1196 1195 IBTF_DPRINTF_L2("hermon", "icm_num_to_hdl failed at level 2"
1197 1196 ": rsrc_type %d, index 0x%x", type, idx);
1198 1197 return (NULL);
1199 1198 }
1200 1199 return (p2[span_offset]);
1201 1200 }
1202 1201
1203 1202 /*
1204 1203 * hermon_icm_set_num_to_hdl()
1205 1204 * Context: Can be called from base or interrupt context.
1206 1205 *
1207 1206 * Given an index of a resource, we index through the sparsely allocated
1208 1207 * arrays to store the software handle, used by hermon_icm_num_to_hdl().
1209 1208 * This function is used to both set and reset (set to NULL) the handle.
1210 1209 * This table is allocated during ICM allocation for the given resource,
1211 1210 * so its existence is a given, and the store location does not conflict
1212 1211 * with any other stores to the table (no locking needed).
1213 1212 */
1214 1213 void
1215 1214 hermon_icm_set_num_to_hdl(hermon_state_t *state, hermon_rsrc_type_t type,
1216 1215 uint32_t idx, void *hdl)
1217 1216 {
1218 1217 hermon_icm_table_t *icm;
1219 1218 uint32_t span_offset;
1220 1219 uint32_t index1, index2;
1221 1220
1222 1221 icm = &state->hs_icm[type];
1223 1222 hermon_index(index1, index2, idx, icm, span_offset);
1224 1223 ASSERT((hdl == NULL) ^
1225 1224 (icm->num_to_hdl[index1][index2][span_offset] == NULL));
1226 1225 icm->num_to_hdl[index1][index2][span_offset] = hdl;
1227 1226 }
1228 1227
1229 1228 /*
1230 1229 * hermon_device_mode()
1231 1230 * Context: Can be called from base or interrupt context.
1232 1231 *
1233 1232 * Return HERMON_HCA_MODE for operational mode
1234 1233 * Return HERMON_MAINTENANCE_MODE for maintenance mode
1235 1234 * Return 0 otherwise
1236 1235 *
1237 1236 * A non-zero return for either operational or maintenance mode simplifies
1238 1237 * one of the 2 uses of this function.
1239 1238 */
1240 1239 int
1241 1240 hermon_device_mode(hermon_state_t *state)
1242 1241 {
1243 1242 if (state->hs_vendor_id != PCI_VENID_MLX)
1244 1243 return (0);
1245 1244
1246 1245 switch (state->hs_device_id) {
1247 1246 case PCI_DEVID_HERMON_SDR:
1248 1247 case PCI_DEVID_HERMON_DDR:
1249 1248 case PCI_DEVID_HERMON_DDRG2:
1250 1249 case PCI_DEVID_HERMON_QDRG2:
1251 1250 case PCI_DEVID_HERMON_QDRG2V:
1252 1251 return (HERMON_HCA_MODE);
1253 1252 case PCI_DEVID_HERMON_MAINT:
1254 1253 return (HERMON_MAINTENANCE_MODE);
1255 1254 default:
1256 1255 return (0);
1257 1256 }
1258 1257 }
1259 1258
1260 1259 /*
1261 1260 * hermon_drv_init()
1262 1261 * Context: Only called from attach() path context
1263 1262 */
1264 1263 /* ARGSUSED */
1265 1264 static int
1266 1265 hermon_drv_init(hermon_state_t *state, dev_info_t *dip, int instance)
1267 1266 {
1268 1267 int status;
1269 1268
1270 1269 /* Retrieve PCI device, vendor and rev IDs */
1271 1270 state->hs_vendor_id = HERMON_GET_VENDOR_ID(state->hs_dip);
1272 1271 state->hs_device_id = HERMON_GET_DEVICE_ID(state->hs_dip);
1273 1272 state->hs_revision_id = HERMON_GET_REVISION_ID(state->hs_dip);
1274 1273
1275 1274 /*
1276 1275 * Check and set the operational mode of the device. If the driver is
1277 1276 * bound to the Hermon device in "maintenance mode", then this generally
1278 1277 * means that either the device has been specifically jumpered to
1279 1278 * start in this mode or the firmware boot process has failed to
1280 1279 * successfully load either the primary or the secondary firmware
1281 1280 * image.
1282 1281 */
1283 1282 state->hs_operational_mode = hermon_device_mode(state);
1284 1283 switch (state->hs_operational_mode) {
1285 1284 case HERMON_HCA_MODE:
1286 1285 state->hs_cfg_profile_setting = HERMON_CFG_MEMFREE;
1287 1286 break;
1288 1287 case HERMON_MAINTENANCE_MODE:
1289 1288 HERMON_FMANOTE(state, HERMON_FMA_MAINT);
1290 1289 state->hs_fm_degraded_reason = HCA_FW_MISC; /* not fw reason */
1291 1290 return (DDI_FAILURE);
1292 1291 default:
1293 1292 HERMON_FMANOTE(state, HERMON_FMA_PCIID);
1294 1293 HERMON_WARNING(state, "unexpected device type detected");
1295 1294 return (DDI_FAILURE);
1296 1295 }
1297 1296
1298 1297 /*
1299 1298 * Initialize the Hermon hardware.
1300 1299 *
1301 1300 * Note: If this routine returns an error, it is often a reasonably
1302 1301 * good indication that something Hermon firmware-related has caused
1303 1302 * the failure or some HW related errors have caused the failure.
1304 1303 * (also there are few possibilities that SW (e.g. SW resource
1305 1304 * shortage) can cause the failure, but the majority case is due to
1306 1305 * either a firmware related error or a HW related one) In order to
1307 1306 * give the user an opportunity (if desired) to update or reflash
1308 1307 * the Hermon firmware image, we set "hs_operational_mode" flag
1309 1308 * (described above) to indicate that we wish to enter maintenance
1310 1309 * mode in case of the firmware-related issue.
1311 1310 */
1312 1311 status = hermon_hw_init(state);
1313 1312 if (status != DDI_SUCCESS) {
1314 1313 cmn_err(CE_NOTE, "hermon%d: error during attach: %s", instance,
1315 1314 state->hs_attach_buf);
1316 1315 return (DDI_FAILURE);
1317 1316 }
1318 1317
1319 1318 /*
1320 1319 * Now that the ISR has been setup, arm all the EQs for event
1321 1320 * generation.
1322 1321 */
1323 1322
1324 1323 status = hermon_eq_arm_all(state);
1325 1324 if (status != DDI_SUCCESS) {
1326 1325 cmn_err(CE_NOTE, "EQ Arm All failed\n");
1327 1326 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1328 1327 return (DDI_FAILURE);
1329 1328 }
1330 1329
1331 1330 /* test interrupts and event queues */
1332 1331 status = hermon_nop_post(state, 0x0, 0x0);
1333 1332 if (status != DDI_SUCCESS) {
1334 1333 cmn_err(CE_NOTE, "Interrupts/EQs failed\n");
1335 1334 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1336 1335 return (DDI_FAILURE);
1337 1336 }
1338 1337
1339 1338 /* Initialize Hermon softstate */
1340 1339 status = hermon_soft_state_init(state);
1341 1340 if (status != DDI_SUCCESS) {
1342 1341 cmn_err(CE_NOTE, "Failed to init soft state\n");
1343 1342 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1344 1343 return (DDI_FAILURE);
1345 1344 }
1346 1345
1347 1346 return (DDI_SUCCESS);
1348 1347 }
1349 1348
1350 1349
1351 1350 /*
1352 1351 * hermon_drv_fini()
1353 1352 * Context: Only called from attach() and/or detach() path contexts
1354 1353 */
1355 1354 static void
1356 1355 hermon_drv_fini(hermon_state_t *state)
1357 1356 {
1358 1357 /* Cleanup Hermon softstate */
1359 1358 hermon_soft_state_fini(state);
1360 1359
1361 1360 /* Cleanup Hermon resources and shutdown hardware */
1362 1361 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1363 1362 }
1364 1363
1365 1364
1366 1365 /*
1367 1366 * hermon_drv_fini2()
1368 1367 * Context: Only called from attach() and/or detach() path contexts
1369 1368 */
1370 1369 static void
1371 1370 hermon_drv_fini2(hermon_state_t *state)
1372 1371 {
1373 1372 if (state->hs_fm_poll_thread) {
1374 1373 ddi_periodic_delete(state->hs_fm_poll_thread);
1375 1374 state->hs_fm_poll_thread = NULL;
1376 1375 }
1377 1376
1378 1377 /* HERMON_DRV_CLEANUP_LEVEL1 */
1379 1378 if (state->hs_fm_cmdhdl) {
1380 1379 hermon_regs_map_free(state, &state->hs_fm_cmdhdl);
1381 1380 state->hs_fm_cmdhdl = NULL;
1382 1381 }
1383 1382
1384 1383 if (state->hs_reg_cmdhdl) {
1385 1384 ddi_regs_map_free(&state->hs_reg_cmdhdl);
1386 1385 state->hs_reg_cmdhdl = NULL;
1387 1386 }
1388 1387
1389 1388 /* HERMON_DRV_CLEANUP_LEVEL0 */
1390 1389 if (state->hs_msix_tbl_entries) {
1391 1390 kmem_free(state->hs_msix_tbl_entries,
1392 1391 state->hs_msix_tbl_size);
1393 1392 state->hs_msix_tbl_entries = NULL;
1394 1393 }
1395 1394
1396 1395 if (state->hs_msix_pba_entries) {
1397 1396 kmem_free(state->hs_msix_pba_entries,
1398 1397 state->hs_msix_pba_size);
1399 1398 state->hs_msix_pba_entries = NULL;
1400 1399 }
1401 1400
1402 1401 if (state->hs_fm_msix_tblhdl) {
1403 1402 hermon_regs_map_free(state, &state->hs_fm_msix_tblhdl);
1404 1403 state->hs_fm_msix_tblhdl = NULL;
1405 1404 }
1406 1405
1407 1406 if (state->hs_reg_msix_tblhdl) {
1408 1407 ddi_regs_map_free(&state->hs_reg_msix_tblhdl);
1409 1408 state->hs_reg_msix_tblhdl = NULL;
1410 1409 }
1411 1410
1412 1411 if (state->hs_fm_msix_pbahdl) {
1413 1412 hermon_regs_map_free(state, &state->hs_fm_msix_pbahdl);
1414 1413 state->hs_fm_msix_pbahdl = NULL;
1415 1414 }
1416 1415
1417 1416 if (state->hs_reg_msix_pbahdl) {
1418 1417 ddi_regs_map_free(&state->hs_reg_msix_pbahdl);
1419 1418 state->hs_reg_msix_pbahdl = NULL;
1420 1419 }
1421 1420
1422 1421 if (state->hs_fm_pcihdl) {
1423 1422 hermon_pci_config_teardown(state, &state->hs_fm_pcihdl);
1424 1423 state->hs_fm_pcihdl = NULL;
1425 1424 }
1426 1425
1427 1426 if (state->hs_reg_pcihdl) {
1428 1427 pci_config_teardown(&state->hs_reg_pcihdl);
1429 1428 state->hs_reg_pcihdl = NULL;
1430 1429 }
1431 1430 }
1432 1431
1433 1432
1434 1433 /*
1435 1434 * hermon_isr_init()
1436 1435 * Context: Only called from attach() path context
1437 1436 */
1438 1437 static int
1439 1438 hermon_isr_init(hermon_state_t *state)
1440 1439 {
1441 1440 int status;
1442 1441 int intr;
1443 1442
1444 1443 for (intr = 0; intr < state->hs_intrmsi_allocd; intr++) {
1445 1444
1446 1445 /*
1447 1446 * Add a handler for the interrupt or MSI
1448 1447 */
1449 1448 status = ddi_intr_add_handler(state->hs_intrmsi_hdl[intr],
1450 1449 hermon_isr, (caddr_t)state, (void *)(uintptr_t)intr);
1451 1450 if (status != DDI_SUCCESS) {
1452 1451 return (DDI_FAILURE);
1453 1452 }
1454 1453
1455 1454 /*
1456 1455 * Enable the software interrupt. Note: depending on the value
1457 1456 * returned in the capability flag, we have to call either
1458 1457 * ddi_intr_block_enable() or ddi_intr_enable().
1459 1458 */
1460 1459 if (state->hs_intrmsi_cap & DDI_INTR_FLAG_BLOCK) {
1461 1460 status = ddi_intr_block_enable(
1462 1461 &state->hs_intrmsi_hdl[intr], 1);
1463 1462 if (status != DDI_SUCCESS) {
1464 1463 return (DDI_FAILURE);
1465 1464 }
1466 1465 } else {
1467 1466 status = ddi_intr_enable(state->hs_intrmsi_hdl[intr]);
1468 1467 if (status != DDI_SUCCESS) {
1469 1468 return (DDI_FAILURE);
1470 1469 }
1471 1470 }
1472 1471 }
1473 1472
1474 1473 /*
1475 1474 * Now that the ISR has been enabled, defer arm_all EQs for event
1476 1475 * generation until later, in case MSIX is enabled
1477 1476 */
1478 1477 return (DDI_SUCCESS);
1479 1478 }
1480 1479
1481 1480
1482 1481 /*
1483 1482 * hermon_isr_fini()
1484 1483 * Context: Only called from attach() and/or detach() path contexts
1485 1484 */
1486 1485 static void
1487 1486 hermon_isr_fini(hermon_state_t *state)
1488 1487 {
1489 1488 int intr;
1490 1489
1491 1490 for (intr = 0; intr < state->hs_intrmsi_allocd; intr++) {
1492 1491 /* Disable the software interrupt */
1493 1492 if (state->hs_intrmsi_cap & DDI_INTR_FLAG_BLOCK) {
1494 1493 (void) ddi_intr_block_disable(
1495 1494 &state->hs_intrmsi_hdl[intr], 1);
1496 1495 } else {
1497 1496 (void) ddi_intr_disable(state->hs_intrmsi_hdl[intr]);
1498 1497 }
1499 1498
1500 1499 /*
1501 1500 * Remove the software handler for the interrupt or MSI
1502 1501 */
1503 1502 (void) ddi_intr_remove_handler(state->hs_intrmsi_hdl[intr]);
1504 1503 }
1505 1504 }
1506 1505
1507 1506
1508 1507 /*
1509 1508 * Sum of ICM configured values:
1510 1509 * cMPT, dMPT, MTT, QPC, SRQC, RDB, CQC, ALTC, AUXC, EQC, MCG
1511 1510 *
1512 1511 */
1513 1512 static uint64_t
1514 1513 hermon_size_icm(hermon_state_t *state)
1515 1514 {
1516 1515 hermon_hw_querydevlim_t *devlim;
1517 1516 hermon_cfg_profile_t *cfg;
1518 1517 uint64_t num_cmpts, num_dmpts, num_mtts;
1519 1518 uint64_t num_qpcs, num_srqc, num_rdbs;
1520 1519 #ifndef HERMON_FW_WORKAROUND
1521 1520 uint64_t num_auxc;
1522 1521 #endif
1523 1522 uint64_t num_cqcs, num_altc;
1524 1523 uint64_t num_eqcs, num_mcgs;
1525 1524 uint64_t size;
1526 1525
1527 1526 devlim = &state->hs_devlim;
1528 1527 cfg = state->hs_cfg_profile;
1529 1528 /* number of respective entries */
1530 1529 num_cmpts = (uint64_t)0x1 << cfg->cp_log_num_cmpt;
1531 1530 num_mtts = (uint64_t)0x1 << cfg->cp_log_num_mtt;
1532 1531 num_dmpts = (uint64_t)0x1 << cfg->cp_log_num_dmpt;
1533 1532 num_qpcs = (uint64_t)0x1 << cfg->cp_log_num_qp;
1534 1533 num_srqc = (uint64_t)0x1 << cfg->cp_log_num_srq;
1535 1534 num_rdbs = (uint64_t)0x1 << cfg->cp_log_num_rdb;
1536 1535 num_cqcs = (uint64_t)0x1 << cfg->cp_log_num_cq;
1537 1536 num_altc = (uint64_t)0x1 << cfg->cp_log_num_qp;
1538 1537 #ifndef HERMON_FW_WORKAROUND
1539 1538 num_auxc = (uint64_t)0x1 << cfg->cp_log_num_qp;
1540 1539 #endif
1541 1540 num_eqcs = (uint64_t)0x1 << cfg->cp_log_num_eq;
1542 1541 num_mcgs = (uint64_t)0x1 << cfg->cp_log_num_mcg;
1543 1542
1544 1543 size =
1545 1544 num_cmpts * devlim->cmpt_entry_sz +
1546 1545 num_dmpts * devlim->dmpt_entry_sz +
1547 1546 num_mtts * devlim->mtt_entry_sz +
1548 1547 num_qpcs * devlim->qpc_entry_sz +
1549 1548 num_srqc * devlim->srq_entry_sz +
1550 1549 num_rdbs * devlim->rdmardc_entry_sz +
1551 1550 num_cqcs * devlim->cqc_entry_sz +
1552 1551 num_altc * devlim->altc_entry_sz +
1553 1552 #ifdef HERMON_FW_WORKAROUND
1554 1553 0x80000000ull +
1555 1554 #else
1556 1555 num_auxc * devlim->aux_entry_sz +
1557 1556 #endif
1558 1557 num_eqcs * devlim->eqc_entry_sz +
1559 1558 num_mcgs * HERMON_MCGMEM_SZ(state);
1560 1559 return (size);
1561 1560 }
1562 1561
1563 1562
1564 1563 /*
1565 1564 * hermon_hw_init()
1566 1565 * Context: Only called from attach() path context
1567 1566 */
1568 1567 static int
1569 1568 hermon_hw_init(hermon_state_t *state)
1570 1569 {
1571 1570 hermon_drv_cleanup_level_t cleanup;
1572 1571 sm_nodeinfo_t nodeinfo;
1573 1572 uint64_t clr_intr_offset;
1574 1573 int status;
1575 1574 uint32_t fw_size; /* in page */
1576 1575 uint64_t offset;
1577 1576
1578 1577 /* This is where driver initialization begins */
1579 1578 cleanup = HERMON_DRV_CLEANUP_LEVEL0;
1580 1579
1581 1580 /* Setup device access attributes */
1582 1581 state->hs_reg_accattr.devacc_attr_version = DDI_DEVICE_ATTR_V1;
1583 1582 state->hs_reg_accattr.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1584 1583 state->hs_reg_accattr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1585 1584 state->hs_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
1586 1585
1587 1586 /* Setup fma-protected access attributes */
1588 1587 state->hs_fm_accattr.devacc_attr_version =
1589 1588 hermon_devacc_attr_version(state);
1590 1589 state->hs_fm_accattr.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1591 1590 state->hs_fm_accattr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1592 1591 /* set acc err protection type */
1593 1592 state->hs_fm_accattr.devacc_attr_access =
1594 1593 hermon_devacc_attr_access(state);
1595 1594
1596 1595 /* Setup for PCI config read/write of HCA device */
1597 1596 status = hermon_pci_config_setup(state, &state->hs_fm_pcihdl);
1598 1597 if (status != DDI_SUCCESS) {
1599 1598 hermon_hw_fini(state, cleanup);
1600 1599 HERMON_ATTACH_MSG(state->hs_attach_buf,
1601 1600 "hw_init_PCI_config_space_regmap_fail");
1602 1601 /* This case is not the degraded one */
1603 1602 return (DDI_FAILURE);
1604 1603 }
1605 1604
1606 1605 /* Map PCI config space and MSI-X tables/pba */
1607 1606 hermon_set_msix_info(state);
1608 1607
1609 1608 /* Map in Hermon registers (CMD, UAR, MSIX) and setup offsets */
1610 1609 status = hermon_regs_map_setup(state, HERMON_CMD_BAR,
1611 1610 &state->hs_reg_cmd_baseaddr, 0, 0, &state->hs_fm_accattr,
1612 1611 &state->hs_fm_cmdhdl);
1613 1612 if (status != DDI_SUCCESS) {
1614 1613 hermon_hw_fini(state, cleanup);
1615 1614 HERMON_ATTACH_MSG(state->hs_attach_buf,
1616 1615 "hw_init_CMD_BAR_regmap_fail");
1617 1616 /* This case is not the degraded one */
1618 1617 return (DDI_FAILURE);
1619 1618 }
1620 1619
1621 1620 cleanup = HERMON_DRV_CLEANUP_LEVEL1;
1622 1621 /*
1623 1622 * We defer UAR-BAR mapping until later. Need to know if
1624 1623 * blueflame mapping is to be done, and don't know that until after
1625 1624 * we get the dev_caps, so do it right after that
1626 1625 */
1627 1626
1628 1627 /*
1629 1628 * There is a third BAR defined for Hermon - it is for MSIX
1630 1629 *
1631 1630 * Will need to explore it's possible need/use w/ Mellanox
1632 1631 * [es] Temporary mapping maybe
1633 1632 */
1634 1633
1635 1634 #ifdef HERMON_SUPPORTS_MSIX_BAR
1636 1635 status = ddi_regs_map_setup(state->hs_dip, HERMON_MSIX_BAR,
1637 1636 &state->hs_reg_msi_baseaddr, 0, 0, &state->hs_reg_accattr,
1638 1637 &state->hs_reg_msihdl);
1639 1638 if (status != DDI_SUCCESS) {
1640 1639 hermon_hw_fini(state, cleanup);
1641 1640 HERMON_ATTACH_MSG(state->hs_attach_buf,
1642 1641 "hw_init_MSIX_BAR_regmap_fail");
1643 1642 /* This case is not the degraded one */
1644 1643 return (DDI_FAILURE);
1645 1644 }
1646 1645 #endif
1647 1646
1648 1647 cleanup = HERMON_DRV_CLEANUP_LEVEL2;
1649 1648
1650 1649 /*
1651 1650 * Save interesting registers away. The offsets of the first two
1652 1651 * here (HCR and sw_reset) are detailed in the PRM, the others are
1653 1652 * derived from values in the QUERY_FW output, so we'll save them
1654 1653 * off later.
1655 1654 */
1656 1655 /* Host Command Register (HCR) */
1657 1656 state->hs_cmd_regs.hcr = (hermon_hw_hcr_t *)
1658 1657 ((uintptr_t)state->hs_reg_cmd_baseaddr + HERMON_CMD_HCR_OFFSET);
1659 1658 state->hs_cmd_toggle = 0; /* initialize it for use */
1660 1659
1661 1660 /* Software Reset register (sw_reset) and semaphore */
1662 1661 state->hs_cmd_regs.sw_reset = (uint32_t *)
1663 1662 ((uintptr_t)state->hs_reg_cmd_baseaddr +
1664 1663 HERMON_CMD_SW_RESET_OFFSET);
1665 1664 state->hs_cmd_regs.sw_semaphore = (uint32_t *)
1666 1665 ((uintptr_t)state->hs_reg_cmd_baseaddr +
1667 1666 HERMON_CMD_SW_SEMAPHORE_OFFSET);
1668 1667
1669 1668 /* make sure init'd before we start filling things in */
1670 1669 bzero(&state->hs_hcaparams, sizeof (struct hermon_hw_initqueryhca_s));
1671 1670
1672 1671 /* Initialize the Phase1 configuration profile */
1673 1672 status = hermon_cfg_profile_init_phase1(state);
1674 1673 if (status != DDI_SUCCESS) {
1675 1674 hermon_hw_fini(state, cleanup);
1676 1675 HERMON_ATTACH_MSG(state->hs_attach_buf,
1677 1676 "hw_init_cfginit1_fail");
1678 1677 /* This case is not the degraded one */
1679 1678 return (DDI_FAILURE);
1680 1679 }
1681 1680 cleanup = HERMON_DRV_CLEANUP_LEVEL3;
1682 1681
1683 1682 /* Do a software reset of the adapter to ensure proper state */
1684 1683 status = hermon_sw_reset(state);
1685 1684 if (status != HERMON_CMD_SUCCESS) {
1686 1685 hermon_hw_fini(state, cleanup);
1687 1686 HERMON_ATTACH_MSG(state->hs_attach_buf,
1688 1687 "hw_init_sw_reset_fail");
1689 1688 /* This case is not the degraded one */
1690 1689 return (DDI_FAILURE);
1691 1690 }
1692 1691
1693 1692 /* Initialize mailboxes */
1694 1693 status = hermon_rsrc_init_phase1(state);
1695 1694 if (status != DDI_SUCCESS) {
1696 1695 hermon_hw_fini(state, cleanup);
1697 1696 HERMON_ATTACH_MSG(state->hs_attach_buf,
1698 1697 "hw_init_rsrcinit1_fail");
1699 1698 /* This case is not the degraded one */
1700 1699 return (DDI_FAILURE);
1701 1700 }
1702 1701 cleanup = HERMON_DRV_CLEANUP_LEVEL4;
1703 1702
1704 1703 /* Post QUERY_FW */
1705 1704 status = hermon_cmn_query_cmd_post(state, QUERY_FW, 0, 0, &state->hs_fw,
1706 1705 sizeof (hermon_hw_queryfw_t), HERMON_CMD_NOSLEEP_SPIN);
1707 1706 if (status != HERMON_CMD_SUCCESS) {
1708 1707 cmn_err(CE_NOTE, "QUERY_FW command failed: %08x\n", status);
1709 1708 hermon_hw_fini(state, cleanup);
1710 1709 HERMON_ATTACH_MSG(state->hs_attach_buf,
1711 1710 "hw_init_query_fw_cmd_fail");
1712 1711 /* This case is not the degraded one */
1713 1712 return (DDI_FAILURE);
1714 1713 }
1715 1714
1716 1715 /* Validate what/that HERMON FW version is appropriate */
1717 1716
1718 1717 status = hermon_fw_version_check(state);
1719 1718 if (status != DDI_SUCCESS) {
1720 1719 HERMON_FMANOTE(state, HERMON_FMA_FWVER);
1721 1720 if (state->hs_operational_mode == HERMON_HCA_MODE) {
1722 1721 cmn_err(CE_CONT, "Unsupported Hermon FW version: "
1723 1722 "expected: %04d.%04d.%04d, "
1724 1723 "actual: %04d.%04d.%04d\n",
1725 1724 HERMON_FW_VER_MAJOR,
1726 1725 HERMON_FW_VER_MINOR,
1727 1726 HERMON_FW_VER_SUBMINOR,
1728 1727 state->hs_fw.fw_rev_major,
1729 1728 state->hs_fw.fw_rev_minor,
1730 1729 state->hs_fw.fw_rev_subminor);
1731 1730 } else {
1732 1731 cmn_err(CE_CONT, "Unsupported FW version: "
1733 1732 "%04d.%04d.%04d\n",
1734 1733 state->hs_fw.fw_rev_major,
1735 1734 state->hs_fw.fw_rev_minor,
1736 1735 state->hs_fw.fw_rev_subminor);
1737 1736 }
1738 1737 state->hs_operational_mode = HERMON_MAINTENANCE_MODE;
1739 1738 state->hs_fm_degraded_reason = HCA_FW_MISMATCH;
1740 1739 hermon_hw_fini(state, cleanup);
1741 1740 HERMON_ATTACH_MSG(state->hs_attach_buf,
1742 1741 "hw_init_checkfwver_fail");
1743 1742 /* This case is the degraded one */
1744 1743 return (HERMON_CMD_BAD_NVMEM);
1745 1744 }
1746 1745
1747 1746 /*
1748 1747 * Save off the rest of the interesting registers that we'll be using.
1749 1748 * Setup the offsets for the other registers.
1750 1749 */
1751 1750
1752 1751 /*
1753 1752 * Hermon does the intr_offset from the BAR - technically should get the
1754 1753 * BAR info from the response, but PRM says it's from BAR0-1, which is
1755 1754 * for us the CMD BAR
1756 1755 */
1757 1756
1758 1757 clr_intr_offset = state->hs_fw.clr_intr_offs & HERMON_CMD_OFFSET_MASK;
1759 1758
1760 1759 /* Save Clear Interrupt address */
1761 1760 state->hs_cmd_regs.clr_intr = (uint64_t *)
1762 1761 (uintptr_t)(state->hs_reg_cmd_baseaddr + clr_intr_offset);
1763 1762
1764 1763 /*
1765 1764 * Set the error buffer also into the structure - used in hermon_event.c
1766 1765 * to check for internal error on the HCA, not reported in eqe or
1767 1766 * (necessarily) by interrupt
1768 1767 */
1769 1768 state->hs_cmd_regs.fw_err_buf = (uint32_t *)(uintptr_t)
1770 1769 (state->hs_reg_cmd_baseaddr + state->hs_fw.error_buf_addr);
1771 1770
1772 1771 /*
1773 1772 * Invoke a polling thread to check the error buffer periodically.
1774 1773 */
1775 1774 if (!hermon_no_inter_err_chk) {
1776 1775 state->hs_fm_poll_thread = ddi_periodic_add(
1777 1776 hermon_inter_err_chk, (void *)state, FM_POLL_INTERVAL,
1778 1777 DDI_IPL_0);
1779 1778 }
1780 1779
1781 1780 cleanup = HERMON_DRV_CLEANUP_LEVEL5;
1782 1781
1783 1782 /*
1784 1783 * Allocate, map, and run the HCA Firmware.
1785 1784 */
1786 1785
1787 1786 /* Allocate memory for the firmware to load into and map it */
1788 1787
1789 1788 /* get next higher power of 2 */
1790 1789 fw_size = 1 << highbit(state->hs_fw.fw_pages);
1791 1790 state->hs_fw_dma.length = fw_size << HERMON_PAGESHIFT;
1792 1791 status = hermon_dma_alloc(state, &state->hs_fw_dma, MAP_FA);
1793 1792 if (status != DDI_SUCCESS) {
1794 1793 cmn_err(CE_NOTE, "FW alloc failed\n");
1795 1794 hermon_hw_fini(state, cleanup);
1796 1795 HERMON_ATTACH_MSG(state->hs_attach_buf,
1797 1796 "hw_init_dma_alloc_fw_fail");
1798 1797 /* This case is not the degraded one */
1799 1798 return (DDI_FAILURE);
1800 1799 }
1801 1800
1802 1801 cleanup = HERMON_DRV_CLEANUP_LEVEL6;
1803 1802
1804 1803 /* Invoke the RUN_FW cmd to run the firmware */
1805 1804 status = hermon_run_fw_cmd_post(state);
1806 1805 if (status != DDI_SUCCESS) {
1807 1806 cmn_err(CE_NOTE, "RUN_FW command failed: 0x%08x\n", status);
1808 1807 if (status == HERMON_CMD_BAD_NVMEM) {
1809 1808 state->hs_operational_mode = HERMON_MAINTENANCE_MODE;
1810 1809 state->hs_fm_degraded_reason = HCA_FW_CORRUPT;
1811 1810 }
1812 1811 hermon_hw_fini(state, cleanup);
1813 1812 HERMON_ATTACH_MSG(state->hs_attach_buf, "hw_init_run_fw_fail");
1814 1813 /*
1815 1814 * If the status is HERMON_CMD_BAD_NVMEM, it's likely the
1816 1815 * firmware is corrupted, so the mode falls into the
1817 1816 * maintenance mode.
1818 1817 */
1819 1818 return (status == HERMON_CMD_BAD_NVMEM ? HERMON_CMD_BAD_NVMEM :
1820 1819 DDI_FAILURE);
1821 1820 }
1822 1821
1823 1822
1824 1823 /*
1825 1824 * QUERY DEVICE LIMITS/CAPABILITIES
1826 1825 * NOTE - in Hermon, the command is changed to QUERY_DEV_CAP,
1827 1826 * but for familiarity we have kept the structure name the
1828 1827 * same as Tavor/Arbel
1829 1828 */
1830 1829
1831 1830 status = hermon_cmn_query_cmd_post(state, QUERY_DEV_CAP, 0, 0,
1832 1831 &state->hs_devlim, sizeof (hermon_hw_querydevlim_t),
1833 1832 HERMON_CMD_NOSLEEP_SPIN);
1834 1833 if (status != HERMON_CMD_SUCCESS) {
1835 1834 cmn_err(CE_NOTE, "QUERY_DEV_CAP command failed: 0x%08x\n",
1836 1835 status);
1837 1836 hermon_hw_fini(state, cleanup);
1838 1837 HERMON_ATTACH_MSG(state->hs_attach_buf, "hw_init_devcap_fail");
1839 1838 /* This case is not the degraded one */
1840 1839 return (DDI_FAILURE);
1841 1840 }
1842 1841
1843 1842 state->hs_rsvd_eqs = max(state->hs_devlim.num_rsvd_eq,
1844 1843 (4 * state->hs_devlim.num_rsvd_uar));
1845 1844
1846 1845 /* now we have enough info to map in the UAR BAR */
1847 1846 /*
1848 1847 * First, we figure out how to map the BAR for UAR - use only half if
1849 1848 * BlueFlame is enabled - in that case the mapped length is 1/2 the
1850 1849 * log_max_uar_sz (max__uar - 1) * 1MB ( +20).
1851 1850 */
1852 1851
1853 1852 if (state->hs_devlim.blu_flm) { /* Blue Flame Enabled */
1854 1853 offset = (uint64_t)1 << (state->hs_devlim.log_max_uar_sz + 20);
1855 1854 } else {
1856 1855 offset = 0; /* a zero length means map the whole thing */
1857 1856 }
1858 1857 status = hermon_regs_map_setup(state, HERMON_UAR_BAR,
1859 1858 &state->hs_reg_uar_baseaddr, 0, offset, &state->hs_fm_accattr,
1860 1859 &state->hs_fm_uarhdl);
1861 1860 if (status != DDI_SUCCESS) {
1862 1861 HERMON_ATTACH_MSG(state->hs_attach_buf, "UAR BAR mapping");
1863 1862 /* This case is not the degraded one */
1864 1863 return (DDI_FAILURE);
1865 1864 }
1866 1865
1867 1866 /* and if BlueFlame is enabled, map the other half there */
1868 1867 if (state->hs_devlim.blu_flm) { /* Blue Flame Enabled */
1869 1868 offset = (uint64_t)1 << (state->hs_devlim.log_max_uar_sz + 20);
1870 1869 status = ddi_regs_map_setup(state->hs_dip, HERMON_UAR_BAR,
1871 1870 &state->hs_reg_bf_baseaddr, offset, offset,
1872 1871 &state->hs_reg_accattr, &state->hs_reg_bfhdl);
1873 1872 if (status != DDI_SUCCESS) {
1874 1873 HERMON_ATTACH_MSG(state->hs_attach_buf,
1875 1874 "BlueFlame BAR mapping");
1876 1875 /* This case is not the degraded one */
1877 1876 return (DDI_FAILURE);
1878 1877 }
1879 1878 /* This will be used in hw_fini if we fail to init. */
1880 1879 state->hs_bf_offset = offset;
1881 1880 }
1882 1881 cleanup = HERMON_DRV_CLEANUP_LEVEL7;
1883 1882
1884 1883 /* Hermon has a couple of things needed for phase 2 in query port */
1885 1884
1886 1885 status = hermon_cmn_query_cmd_post(state, QUERY_PORT, 0, 0x01,
1887 1886 &state->hs_queryport, sizeof (hermon_hw_query_port_t),
1888 1887 HERMON_CMD_NOSLEEP_SPIN);
1889 1888 if (status != HERMON_CMD_SUCCESS) {
1890 1889 cmn_err(CE_NOTE, "QUERY_PORT command failed: 0x%08x\n",
1891 1890 status);
1892 1891 hermon_hw_fini(state, cleanup);
1893 1892 HERMON_ATTACH_MSG(state->hs_attach_buf,
1894 1893 "hw_init_queryport_fail");
1895 1894 /* This case is not the degraded one */
1896 1895 return (DDI_FAILURE);
1897 1896 }
1898 1897
1899 1898 /* Initialize the Phase2 Hermon configuration profile */
1900 1899 status = hermon_cfg_profile_init_phase2(state);
1901 1900 if (status != DDI_SUCCESS) {
1902 1901 cmn_err(CE_NOTE, "CFG phase 2 failed: 0x%08x\n", status);
1903 1902 hermon_hw_fini(state, cleanup);
1904 1903 HERMON_ATTACH_MSG(state->hs_attach_buf,
1905 1904 "hw_init_cfginit2_fail");
1906 1905 /* This case is not the degraded one */
1907 1906 return (DDI_FAILURE);
1908 1907 }
1909 1908
1910 1909 /* Determine and set the ICM size */
1911 1910 state->hs_icm_sz = hermon_size_icm(state);
1912 1911 status = hermon_set_icm_size_cmd_post(state);
1913 1912 if (status != DDI_SUCCESS) {
1914 1913 cmn_err(CE_NOTE, "Hermon: SET_ICM_SIZE cmd failed: 0x%08x\n",
1915 1914 status);
1916 1915 hermon_hw_fini(state, cleanup);
1917 1916 HERMON_ATTACH_MSG(state->hs_attach_buf,
1918 1917 "hw_init_seticmsz_fail");
1919 1918 /* This case is not the degraded one */
1920 1919 return (DDI_FAILURE);
1921 1920 }
1922 1921 /* alloc icm aux physical memory and map it */
1923 1922
1924 1923 state->hs_icma_dma.length = 1 << highbit(state->hs_icma_sz);
1925 1924
1926 1925 status = hermon_dma_alloc(state, &state->hs_icma_dma, MAP_ICM_AUX);
1927 1926 if (status != DDI_SUCCESS) {
1928 1927 cmn_err(CE_NOTE, "failed to alloc (0x%llx) bytes for ICMA\n",
1929 1928 (longlong_t)state->hs_icma_dma.length);
1930 1929 hermon_hw_fini(state, cleanup);
1931 1930 HERMON_ATTACH_MSG(state->hs_attach_buf,
1932 1931 "hw_init_dma_alloc_icm_aux_fail");
1933 1932 /* This case is not the degraded one */
1934 1933 return (DDI_FAILURE);
1935 1934 }
1936 1935 cleanup = HERMON_DRV_CLEANUP_LEVEL8;
1937 1936
1938 1937 cleanup = HERMON_DRV_CLEANUP_LEVEL9;
1939 1938
1940 1939 /* Allocate an array of structures to house the ICM tables */
1941 1940 state->hs_icm = kmem_zalloc(HERMON_NUM_ICM_RESOURCES *
1942 1941 sizeof (hermon_icm_table_t), KM_SLEEP);
1943 1942
1944 1943 /* Set up the ICM address space and the INIT_HCA command input */
1945 1944 status = hermon_icm_config_setup(state, &state->hs_hcaparams);
1946 1945 if (status != HERMON_CMD_SUCCESS) {
1947 1946 cmn_err(CE_NOTE, "ICM configuration failed\n");
1948 1947 hermon_hw_fini(state, cleanup);
1949 1948 HERMON_ATTACH_MSG(state->hs_attach_buf,
1950 1949 "hw_init_icm_config_setup_fail");
1951 1950 /* This case is not the degraded one */
1952 1951 return (DDI_FAILURE);
1953 1952 }
1954 1953 cleanup = HERMON_DRV_CLEANUP_LEVEL10;
1955 1954
1956 1955 /* Initialize the adapter with the INIT_HCA cmd */
1957 1956 status = hermon_init_hca_cmd_post(state, &state->hs_hcaparams,
1958 1957 HERMON_CMD_NOSLEEP_SPIN);
1959 1958 if (status != HERMON_CMD_SUCCESS) {
1960 1959 cmn_err(CE_NOTE, "INIT_HCA command failed: %08x\n", status);
1961 1960 hermon_hw_fini(state, cleanup);
1962 1961 HERMON_ATTACH_MSG(state->hs_attach_buf, "hw_init_hca_fail");
1963 1962 /* This case is not the degraded one */
1964 1963 return (DDI_FAILURE);
1965 1964 }
1966 1965 cleanup = HERMON_DRV_CLEANUP_LEVEL11;
1967 1966
1968 1967 /* Enter the second phase of init for Hermon configuration/resources */
1969 1968 status = hermon_rsrc_init_phase2(state);
1970 1969 if (status != DDI_SUCCESS) {
1971 1970 hermon_hw_fini(state, cleanup);
1972 1971 HERMON_ATTACH_MSG(state->hs_attach_buf,
1973 1972 "hw_init_rsrcinit2_fail");
1974 1973 /* This case is not the degraded one */
1975 1974 return (DDI_FAILURE);
1976 1975 }
1977 1976 cleanup = HERMON_DRV_CLEANUP_LEVEL12;
1978 1977
1979 1978 /* Query the adapter via QUERY_ADAPTER */
1980 1979 status = hermon_cmn_query_cmd_post(state, QUERY_ADAPTER, 0, 0,
1981 1980 &state->hs_adapter, sizeof (hermon_hw_queryadapter_t),
1982 1981 HERMON_CMD_NOSLEEP_SPIN);
1983 1982 if (status != HERMON_CMD_SUCCESS) {
1984 1983 cmn_err(CE_NOTE, "Hermon: QUERY_ADAPTER command failed: %08x\n",
1985 1984 status);
1986 1985 hermon_hw_fini(state, cleanup);
1987 1986 HERMON_ATTACH_MSG(state->hs_attach_buf,
1988 1987 "hw_init_query_adapter_fail");
1989 1988 /* This case is not the degraded one */
1990 1989 return (DDI_FAILURE);
1991 1990 }
1992 1991
1993 1992 /* Allocate protection domain (PD) for Hermon internal use */
1994 1993 status = hermon_pd_alloc(state, &state->hs_pdhdl_internal,
1995 1994 HERMON_SLEEP);
1996 1995 if (status != DDI_SUCCESS) {
1997 1996 cmn_err(CE_NOTE, "failed to alloc internal PD\n");
1998 1997 hermon_hw_fini(state, cleanup);
1999 1998 HERMON_ATTACH_MSG(state->hs_attach_buf,
2000 1999 "hw_init_internal_pd_alloc_fail");
2001 2000 /* This case is not the degraded one */
2002 2001 return (DDI_FAILURE);
2003 2002 }
2004 2003 cleanup = HERMON_DRV_CLEANUP_LEVEL13;
2005 2004
2006 2005 /* Setup UAR page for kernel use */
2007 2006 status = hermon_internal_uarpg_init(state);
2008 2007 if (status != DDI_SUCCESS) {
2009 2008 cmn_err(CE_NOTE, "failed to setup internal UAR\n");
2010 2009 hermon_hw_fini(state, cleanup);
2011 2010 HERMON_ATTACH_MSG(state->hs_attach_buf,
2012 2011 "hw_init_internal_uarpg_alloc_fail");
2013 2012 /* This case is not the degraded one */
2014 2013 return (DDI_FAILURE);
2015 2014 }
2016 2015 cleanup = HERMON_DRV_CLEANUP_LEVEL14;
2017 2016
2018 2017 /* Query and initialize the Hermon interrupt/MSI information */
2019 2018 status = hermon_intr_or_msi_init(state);
2020 2019 if (status != DDI_SUCCESS) {
2021 2020 cmn_err(CE_NOTE, "failed to setup INTR/MSI\n");
2022 2021 hermon_hw_fini(state, cleanup);
2023 2022 HERMON_ATTACH_MSG(state->hs_attach_buf,
2024 2023 "hw_init_intr_or_msi_init_fail");
2025 2024 /* This case is not the degraded one */
2026 2025 return (DDI_FAILURE);
2027 2026 }
2028 2027 cleanup = HERMON_DRV_CLEANUP_LEVEL15;
2029 2028
2030 2029 status = hermon_isr_init(state); /* set up the isr */
2031 2030 if (status != DDI_SUCCESS) {
2032 2031 cmn_err(CE_NOTE, "failed to init isr\n");
2033 2032 hermon_hw_fini(state, cleanup);
2034 2033 HERMON_ATTACH_MSG(state->hs_attach_buf,
2035 2034 "hw_init_isrinit_fail");
2036 2035 /* This case is not the degraded one */
2037 2036 return (DDI_FAILURE);
2038 2037 }
2039 2038 cleanup = HERMON_DRV_CLEANUP_LEVEL16;
2040 2039
2041 2040 /* Setup the event queues */
2042 2041 status = hermon_eq_init_all(state);
2043 2042 if (status != DDI_SUCCESS) {
2044 2043 cmn_err(CE_NOTE, "failed to init EQs\n");
2045 2044 hermon_hw_fini(state, cleanup);
2046 2045 HERMON_ATTACH_MSG(state->hs_attach_buf,
2047 2046 "hw_init_eqinitall_fail");
2048 2047 /* This case is not the degraded one */
2049 2048 return (DDI_FAILURE);
2050 2049 }
2051 2050 cleanup = HERMON_DRV_CLEANUP_LEVEL17;
2052 2051
2053 2052
2054 2053
2055 2054 /* Reserve contexts for QP0 and QP1 */
2056 2055 status = hermon_special_qp_contexts_reserve(state);
2057 2056 if (status != DDI_SUCCESS) {
2058 2057 cmn_err(CE_NOTE, "failed to init special QPs\n");
2059 2058 hermon_hw_fini(state, cleanup);
2060 2059 HERMON_ATTACH_MSG(state->hs_attach_buf,
2061 2060 "hw_init_rsrv_sqp_fail");
2062 2061 /* This case is not the degraded one */
2063 2062 return (DDI_FAILURE);
2064 2063 }
2065 2064 cleanup = HERMON_DRV_CLEANUP_LEVEL18;
2066 2065
2067 2066 /* Initialize for multicast group handling */
2068 2067 status = hermon_mcg_init(state);
2069 2068 if (status != DDI_SUCCESS) {
2070 2069 cmn_err(CE_NOTE, "failed to init multicast\n");
2071 2070 hermon_hw_fini(state, cleanup);
2072 2071 HERMON_ATTACH_MSG(state->hs_attach_buf,
2073 2072 "hw_init_mcg_init_fail");
2074 2073 /* This case is not the degraded one */
2075 2074 return (DDI_FAILURE);
2076 2075 }
2077 2076 cleanup = HERMON_DRV_CLEANUP_LEVEL19;
2078 2077
2079 2078 /* Initialize the Hermon IB port(s) */
2080 2079 status = hermon_hca_port_init(state);
2081 2080 if (status != DDI_SUCCESS) {
2082 2081 cmn_err(CE_NOTE, "failed to init HCA Port\n");
2083 2082 hermon_hw_fini(state, cleanup);
2084 2083 HERMON_ATTACH_MSG(state->hs_attach_buf,
2085 2084 "hw_init_hca_port_init_fail");
2086 2085 /* This case is not the degraded one */
2087 2086 return (DDI_FAILURE);
2088 2087 }
2089 2088
2090 2089 cleanup = HERMON_DRV_CLEANUP_ALL;
2091 2090
2092 2091 /* Determine NodeGUID and SystemImageGUID */
2093 2092 status = hermon_getnodeinfo_cmd_post(state, HERMON_CMD_NOSLEEP_SPIN,
2094 2093 &nodeinfo);
2095 2094 if (status != HERMON_CMD_SUCCESS) {
2096 2095 cmn_err(CE_NOTE, "GetNodeInfo command failed: %08x\n", status);
2097 2096 hermon_hw_fini(state, cleanup);
2098 2097 HERMON_ATTACH_MSG(state->hs_attach_buf,
2099 2098 "hw_init_getnodeinfo_cmd_fail");
2100 2099 /* This case is not the degraded one */
2101 2100 return (DDI_FAILURE);
2102 2101 }
2103 2102
2104 2103 /*
2105 2104 * If the NodeGUID value was set in OBP properties, then we use that
2106 2105 * value. But we still print a message if the value we queried from
2107 2106 * firmware does not match this value.
2108 2107 *
2109 2108 * Otherwise if OBP value is not set then we use the value from
2110 2109 * firmware unconditionally.
2111 2110 */
2112 2111 if (state->hs_cfg_profile->cp_nodeguid) {
2113 2112 state->hs_nodeguid = state->hs_cfg_profile->cp_nodeguid;
2114 2113 } else {
2115 2114 state->hs_nodeguid = nodeinfo.NodeGUID;
2116 2115 }
2117 2116
2118 2117 if (state->hs_nodeguid != nodeinfo.NodeGUID) {
2119 2118 cmn_err(CE_NOTE, "!NodeGUID value queried from firmware "
2120 2119 "does not match value set by device property");
2121 2120 }
2122 2121
2123 2122 /*
2124 2123 * If the SystemImageGUID value was set in OBP properties, then we use
2125 2124 * that value. But we still print a message if the value we queried
2126 2125 * from firmware does not match this value.
2127 2126 *
2128 2127 * Otherwise if OBP value is not set then we use the value from
2129 2128 * firmware unconditionally.
2130 2129 */
2131 2130 if (state->hs_cfg_profile->cp_sysimgguid) {
2132 2131 state->hs_sysimgguid = state->hs_cfg_profile->cp_sysimgguid;
2133 2132 } else {
2134 2133 state->hs_sysimgguid = nodeinfo.SystemImageGUID;
2135 2134 }
2136 2135
2137 2136 if (state->hs_sysimgguid != nodeinfo.SystemImageGUID) {
2138 2137 cmn_err(CE_NOTE, "!SystemImageGUID value queried from firmware "
2139 2138 "does not match value set by device property");
2140 2139 }
2141 2140
2142 2141 /* Get NodeDescription */
2143 2142 status = hermon_getnodedesc_cmd_post(state, HERMON_CMD_NOSLEEP_SPIN,
2144 2143 (sm_nodedesc_t *)&state->hs_nodedesc);
2145 2144 if (status != HERMON_CMD_SUCCESS) {
2146 2145 cmn_err(CE_CONT, "GetNodeDesc command failed: %08x\n", status);
2147 2146 hermon_hw_fini(state, cleanup);
2148 2147 HERMON_ATTACH_MSG(state->hs_attach_buf,
2149 2148 "hw_init_getnodedesc_cmd_fail");
2150 2149 /* This case is not the degraded one */
2151 2150 return (DDI_FAILURE);
2152 2151 }
2153 2152
2154 2153 return (DDI_SUCCESS);
2155 2154 }
2156 2155
2157 2156
2158 2157 /*
2159 2158 * hermon_hw_fini()
2160 2159 * Context: Only called from attach() and/or detach() path contexts
2161 2160 */
2162 2161 static void
2163 2162 hermon_hw_fini(hermon_state_t *state, hermon_drv_cleanup_level_t cleanup)
2164 2163 {
2165 2164 uint_t num_ports;
2166 2165 int i, status;
2167 2166
2168 2167
2169 2168 /*
2170 2169 * JBDB - We might not want to run these returns in all cases of
2171 2170 * Bad News. We should still attempt to free all of the DMA memory
2172 2171 * resources... This needs to be worked last, after all allocations
2173 2172 * are implemented. For now, and possibly for later, this works.
2174 2173 */
2175 2174
2176 2175 switch (cleanup) {
2177 2176 /*
2178 2177 * If we add more driver initialization steps that should be cleaned
2179 2178 * up here, we need to ensure that HERMON_DRV_CLEANUP_ALL is still the
2180 2179 * first entry (i.e. corresponds to the last init step).
2181 2180 */
2182 2181 case HERMON_DRV_CLEANUP_ALL:
2183 2182 /* Shutdown the Hermon IB port(s) */
2184 2183 num_ports = state->hs_cfg_profile->cp_num_ports;
2185 2184 (void) hermon_hca_ports_shutdown(state, num_ports);
2186 2185 /* FALLTHROUGH */
2187 2186
2188 2187 case HERMON_DRV_CLEANUP_LEVEL19:
2189 2188 /* Teardown resources used for multicast group handling */
2190 2189 hermon_mcg_fini(state);
2191 2190 /* FALLTHROUGH */
2192 2191
2193 2192 case HERMON_DRV_CLEANUP_LEVEL18:
2194 2193 /* Unreserve the special QP contexts */
2195 2194 hermon_special_qp_contexts_unreserve(state);
2196 2195 /* FALLTHROUGH */
2197 2196
2198 2197 case HERMON_DRV_CLEANUP_LEVEL17:
2199 2198 /*
2200 2199 * Attempt to teardown all event queues (EQ). If we fail
2201 2200 * here then print a warning message and return. Something
2202 2201 * (either in HW or SW) has gone seriously wrong.
2203 2202 */
2204 2203 status = hermon_eq_fini_all(state);
2205 2204 if (status != DDI_SUCCESS) {
2206 2205 HERMON_WARNING(state, "failed to teardown EQs");
2207 2206 return;
2208 2207 }
2209 2208 /* FALLTHROUGH */
2210 2209 case HERMON_DRV_CLEANUP_LEVEL16:
2211 2210 /* Teardown Hermon interrupts */
2212 2211 hermon_isr_fini(state);
2213 2212 /* FALLTHROUGH */
2214 2213
2215 2214 case HERMON_DRV_CLEANUP_LEVEL15:
2216 2215 status = hermon_intr_or_msi_fini(state);
2217 2216 if (status != DDI_SUCCESS) {
2218 2217 HERMON_WARNING(state, "failed to free intr/MSI");
2219 2218 return;
2220 2219 }
2221 2220 /* FALLTHROUGH */
2222 2221
2223 2222 case HERMON_DRV_CLEANUP_LEVEL14:
2224 2223 /* Free the resources for the Hermon internal UAR pages */
2225 2224 hermon_internal_uarpg_fini(state);
2226 2225 /* FALLTHROUGH */
2227 2226
2228 2227 case HERMON_DRV_CLEANUP_LEVEL13:
2229 2228 /*
2230 2229 * Free the PD that was used internally by Hermon software. If
2231 2230 * we fail here then print a warning and return. Something
2232 2231 * (probably software-related, but perhaps HW) has gone wrong.
2233 2232 */
2234 2233 status = hermon_pd_free(state, &state->hs_pdhdl_internal);
2235 2234 if (status != DDI_SUCCESS) {
2236 2235 HERMON_WARNING(state, "failed to free internal PD");
2237 2236 return;
2238 2237 }
2239 2238 /* FALLTHROUGH */
2240 2239
2241 2240 case HERMON_DRV_CLEANUP_LEVEL12:
2242 2241 /* Cleanup all the phase2 resources first */
2243 2242 hermon_rsrc_fini(state, HERMON_RSRC_CLEANUP_ALL);
2244 2243 /* FALLTHROUGH */
2245 2244
2246 2245 case HERMON_DRV_CLEANUP_LEVEL11:
2247 2246 /* LEVEL11 is after INIT_HCA */
2248 2247 /* FALLTHROUGH */
2249 2248
2250 2249
2251 2250 case HERMON_DRV_CLEANUP_LEVEL10:
2252 2251 /*
2253 2252 * Unmap the ICM memory area with UNMAP_ICM command.
2254 2253 */
2255 2254 status = hermon_unmap_icm_cmd_post(state, NULL);
2256 2255 if (status != DDI_SUCCESS) {
2257 2256 cmn_err(CE_WARN,
2258 2257 "hermon_hw_fini: failed to unmap ICM\n");
2259 2258 }
2260 2259
2261 2260 /* Free the initial ICM DMA handles */
2262 2261 hermon_icm_dma_fini(state);
2263 2262
2264 2263 /* Free the ICM table structures */
2265 2264 hermon_icm_tables_fini(state);
2266 2265
2267 2266 /* Free the ICM table handles */
2268 2267 kmem_free(state->hs_icm, HERMON_NUM_ICM_RESOURCES *
2269 2268 sizeof (hermon_icm_table_t));
2270 2269
2271 2270 /* FALLTHROUGH */
2272 2271
2273 2272 case HERMON_DRV_CLEANUP_LEVEL9:
2274 2273 /*
2275 2274 * Unmap the ICM Aux memory area with UNMAP_ICM_AUX command.
2276 2275 */
2277 2276 status = hermon_unmap_icm_aux_cmd_post(state);
2278 2277 if (status != HERMON_CMD_SUCCESS) {
2279 2278 cmn_err(CE_NOTE,
2280 2279 "hermon_hw_fini: failed to unmap ICMA\n");
2281 2280 }
2282 2281 /* FALLTHROUGH */
2283 2282
2284 2283 case HERMON_DRV_CLEANUP_LEVEL8:
2285 2284 /*
2286 2285 * Deallocate ICM Aux DMA memory.
2287 2286 */
2288 2287 hermon_dma_free(&state->hs_icma_dma);
2289 2288 /* FALLTHROUGH */
2290 2289
2291 2290 case HERMON_DRV_CLEANUP_LEVEL7:
2292 2291 if (state->hs_fm_uarhdl) {
2293 2292 hermon_regs_map_free(state, &state->hs_fm_uarhdl);
2294 2293 state->hs_fm_uarhdl = NULL;
2295 2294 }
2296 2295
2297 2296 if (state->hs_reg_uarhdl) {
2298 2297 ddi_regs_map_free(&state->hs_reg_uarhdl);
2299 2298 state->hs_reg_uarhdl = NULL;
2300 2299 }
2301 2300
2302 2301 if (state->hs_bf_offset != 0 && state->hs_reg_bfhdl) {
2303 2302 ddi_regs_map_free(&state->hs_reg_bfhdl);
2304 2303 state->hs_reg_bfhdl = NULL;
2305 2304 }
2306 2305
2307 2306 for (i = 0; i < HERMON_MAX_PORTS; i++) {
2308 2307 if (state->hs_pkey[i]) {
2309 2308 kmem_free(state->hs_pkey[i], (1 <<
2310 2309 state->hs_cfg_profile->cp_log_max_pkeytbl) *
2311 2310 sizeof (ib_pkey_t));
2312 2311 state->hs_pkey[i] = NULL;
2313 2312 }
2314 2313 if (state->hs_guid[i]) {
2315 2314 kmem_free(state->hs_guid[i], (1 <<
2316 2315 state->hs_cfg_profile->cp_log_max_gidtbl) *
2317 2316 sizeof (ib_guid_t));
2318 2317 state->hs_guid[i] = NULL;
2319 2318 }
2320 2319 }
2321 2320 /* FALLTHROUGH */
2322 2321
2323 2322 case HERMON_DRV_CLEANUP_LEVEL6:
2324 2323 /*
2325 2324 * Unmap the firmware memory area with UNMAP_FA command.
2326 2325 */
2327 2326 status = hermon_unmap_fa_cmd_post(state);
2328 2327
2329 2328 if (status != HERMON_CMD_SUCCESS) {
2330 2329 cmn_err(CE_NOTE,
2331 2330 "hermon_hw_fini: failed to unmap FW\n");
2332 2331 }
2333 2332
2334 2333 /*
2335 2334 * Deallocate firmware DMA memory.
2336 2335 */
2337 2336 hermon_dma_free(&state->hs_fw_dma);
2338 2337 /* FALLTHROUGH */
2339 2338
2340 2339 case HERMON_DRV_CLEANUP_LEVEL5:
2341 2340 /* stop the poll thread */
2342 2341 if (state->hs_fm_poll_thread) {
2343 2342 ddi_periodic_delete(state->hs_fm_poll_thread);
2344 2343 state->hs_fm_poll_thread = NULL;
2345 2344 }
2346 2345 /* FALLTHROUGH */
2347 2346
2348 2347 case HERMON_DRV_CLEANUP_LEVEL4:
2349 2348 /* Then cleanup the phase1 resources */
2350 2349 hermon_rsrc_fini(state, HERMON_RSRC_CLEANUP_PHASE1_COMPLETE);
2351 2350 /* FALLTHROUGH */
2352 2351
2353 2352 case HERMON_DRV_CLEANUP_LEVEL3:
2354 2353 /* Teardown any resources allocated for the config profile */
2355 2354 hermon_cfg_profile_fini(state);
2356 2355 /* FALLTHROUGH */
2357 2356
2358 2357 case HERMON_DRV_CLEANUP_LEVEL2:
2359 2358 #ifdef HERMON_SUPPORTS_MSIX_BAR
2360 2359 /*
2361 2360 * unmap 3rd BAR, MSIX BAR
2362 2361 */
2363 2362 if (state->hs_reg_msihdl) {
2364 2363 ddi_regs_map_free(&state->hs_reg_msihdl);
2365 2364 state->hs_reg_msihdl = NULL;
2366 2365 }
2367 2366 /* FALLTHROUGH */
2368 2367 #endif
2369 2368 case HERMON_DRV_CLEANUP_LEVEL1:
2370 2369 case HERMON_DRV_CLEANUP_LEVEL0:
2371 2370 /*
2372 2371 * LEVEL1 and LEVEL0 resources are freed in
2373 2372 * hermon_drv_fini2().
2374 2373 */
2375 2374 break;
2376 2375
2377 2376 default:
2378 2377 HERMON_WARNING(state, "unexpected driver cleanup level");
2379 2378 return;
2380 2379 }
2381 2380 }
2382 2381
2383 2382
2384 2383 /*
2385 2384 * hermon_soft_state_init()
2386 2385 * Context: Only called from attach() path context
2387 2386 */
2388 2387 static int
2389 2388 hermon_soft_state_init(hermon_state_t *state)
2390 2389 {
2391 2390 ibt_hca_attr_t *hca_attr;
2392 2391 uint64_t maxval, val;
2393 2392 ibt_hca_flags_t caps = IBT_HCA_NO_FLAGS;
2394 2393 ibt_hca_flags2_t caps2 = IBT_HCA2_NO_FLAGS;
2395 2394 int status;
2396 2395 int max_send_wqe_bytes;
2397 2396 int max_recv_wqe_bytes;
2398 2397
2399 2398 /*
2400 2399 * The ibc_hca_info_t struct is passed to the IBTF. This is the
2401 2400 * routine where we initialize it. Many of the init values come from
2402 2401 * either configuration variables or successful queries of the Hermon
2403 2402 * hardware abilities
2404 2403 */
2405 2404 state->hs_ibtfinfo.hca_ci_vers = IBCI_V4;
2406 2405 state->hs_ibtfinfo.hca_handle = (ibc_hca_hdl_t)state;
2407 2406 state->hs_ibtfinfo.hca_ops = &hermon_ibc_ops;
2408 2407
2409 2408 hca_attr = kmem_zalloc(sizeof (ibt_hca_attr_t), KM_SLEEP);
2410 2409 state->hs_ibtfinfo.hca_attr = hca_attr;
2411 2410
2412 2411 hca_attr->hca_dip = state->hs_dip;
2413 2412 hca_attr->hca_fw_major_version = state->hs_fw.fw_rev_major;
2414 2413 hca_attr->hca_fw_minor_version = state->hs_fw.fw_rev_minor;
2415 2414 hca_attr->hca_fw_micro_version = state->hs_fw.fw_rev_subminor;
2416 2415
2417 2416 /* CQ interrupt moderation maximums - each limited to 16 bits */
2418 2417 hca_attr->hca_max_cq_mod_count = 0xFFFF;
2419 2418 hca_attr->hca_max_cq_mod_usec = 0xFFFF;
2420 2419 hca_attr->hca_max_cq_handlers = state->hs_intrmsi_allocd;
2421 2420
2422 2421
2423 2422 /*
2424 2423 * Determine HCA capabilities:
2425 2424 * No default support for IBT_HCA_RD, IBT_HCA_RAW_MULTICAST,
2426 2425 * IBT_HCA_ATOMICS_GLOBAL, IBT_HCA_RESIZE_CHAN, IBT_HCA_INIT_TYPE,
2427 2426 * or IBT_HCA_SHUTDOWN_PORT
2428 2427 * But IBT_HCA_AH_PORT_CHECK, IBT_HCA_SQD_RTS_PORT, IBT_HCA_SI_GUID,
2429 2428 * IBT_HCA_RNR_NAK, IBT_HCA_CURRENT_QP_STATE, IBT_HCA_PORT_UP,
2430 2429 * IBT_HCA_SRQ, IBT_HCA_RESIZE_SRQ and IBT_HCA_FMR are always
2431 2430 * supported
2432 2431 * All other features are conditionally supported, depending on the
2433 2432 * status return by the Hermon HCA in QUERY_DEV_LIM.
2434 2433 */
2435 2434 if (state->hs_devlim.ud_multi) {
2436 2435 caps |= IBT_HCA_UD_MULTICAST;
2437 2436 }
2438 2437 if (state->hs_devlim.atomic) {
2439 2438 caps |= IBT_HCA_ATOMICS_HCA;
2440 2439 }
2441 2440 if (state->hs_devlim.apm) {
2442 2441 caps |= IBT_HCA_AUTO_PATH_MIG;
2443 2442 }
2444 2443 if (state->hs_devlim.pkey_v) {
2445 2444 caps |= IBT_HCA_PKEY_CNTR;
2446 2445 }
2447 2446 if (state->hs_devlim.qkey_v) {
2448 2447 caps |= IBT_HCA_QKEY_CNTR;
2449 2448 }
2450 2449 if (state->hs_devlim.ipoib_cksm) {
2451 2450 caps |= IBT_HCA_CKSUM_FULL;
2452 2451 caps2 |= IBT_HCA2_IP_CLASS;
2453 2452 }
2454 2453 if (state->hs_devlim.mod_wr_srq) {
2455 2454 caps |= IBT_HCA_RESIZE_SRQ;
2456 2455 }
2457 2456 if (state->hs_devlim.lif) {
2458 2457 caps |= IBT_HCA_LOCAL_INVAL_FENCE;
2459 2458 }
2460 2459 if (state->hs_devlim.reserved_lkey) {
2461 2460 caps2 |= IBT_HCA2_RES_LKEY;
2462 2461 hca_attr->hca_reserved_lkey = state->hs_devlim.rsv_lkey;
2463 2462 }
2464 2463 if (state->hs_devlim.local_inv && state->hs_devlim.remote_inv &&
2465 2464 state->hs_devlim.fast_reg_wr) { /* fw needs to be >= 2.7.000 */
2466 2465 if ((state->hs_fw.fw_rev_major > 2) ||
2467 2466 ((state->hs_fw.fw_rev_major == 2) &&
2468 2467 (state->hs_fw.fw_rev_minor >= 7)))
2469 2468 caps2 |= IBT_HCA2_MEM_MGT_EXT;
2470 2469 }
2471 2470 if (state->hs_devlim.log_max_rss_tbl_sz) {
2472 2471 hca_attr->hca_rss_max_log2_table =
2473 2472 state->hs_devlim.log_max_rss_tbl_sz;
2474 2473 if (state->hs_devlim.rss_xor)
2475 2474 caps2 |= IBT_HCA2_RSS_XOR_ALG;
2476 2475 if (state->hs_devlim.rss_toep)
2477 2476 caps2 |= IBT_HCA2_RSS_TPL_ALG;
2478 2477 }
2479 2478 if (state->hs_devlim.mps) {
2480 2479 caps |= IBT_HCA_ZERO_BASED_VA;
2481 2480 }
2482 2481 if (state->hs_devlim.zb) {
2483 2482 caps |= IBT_HCA_MULT_PAGE_SZ_MR;
2484 2483 }
2485 2484 caps |= (IBT_HCA_AH_PORT_CHECK | IBT_HCA_SQD_SQD_PORT |
2486 2485 IBT_HCA_SI_GUID | IBT_HCA_RNR_NAK | IBT_HCA_CURRENT_QP_STATE |
2487 2486 IBT_HCA_PORT_UP | IBT_HCA_RC_SRQ | IBT_HCA_UD_SRQ | IBT_HCA_FMR);
2488 2487 caps2 |= IBT_HCA2_DMA_MR;
2489 2488
2490 2489 if (state->hs_devlim.log_max_gso_sz) {
2491 2490 hca_attr->hca_max_lso_size =
2492 2491 (1 << state->hs_devlim.log_max_gso_sz);
2493 2492 /* 64 = ctrl & datagram seg, 4 = LSO seg, 16 = 1 SGL */
2494 2493 hca_attr->hca_max_lso_hdr_size =
2495 2494 state->hs_devlim.max_desc_sz_sq - (64 + 4 + 16);
2496 2495 }
2497 2496
2498 2497 caps |= IBT_HCA_WQE_SIZE_INFO;
2499 2498 max_send_wqe_bytes = state->hs_devlim.max_desc_sz_sq;
2500 2499 max_recv_wqe_bytes = state->hs_devlim.max_desc_sz_rq;
2501 2500 hca_attr->hca_ud_send_sgl_sz = (max_send_wqe_bytes / 16) - 4;
2502 2501 hca_attr->hca_conn_send_sgl_sz = (max_send_wqe_bytes / 16) - 1;
2503 2502 hca_attr->hca_conn_rdma_sgl_overhead = 1;
2504 2503 hca_attr->hca_conn_rdma_write_sgl_sz = (max_send_wqe_bytes / 16) - 2;
2505 2504 hca_attr->hca_conn_rdma_read_sgl_sz = (512 / 16) - 2; /* see PRM */
2506 2505 hca_attr->hca_recv_sgl_sz = max_recv_wqe_bytes / 16;
2507 2506
2508 2507 /* We choose not to support "inline" unless it improves performance */
2509 2508 hca_attr->hca_max_inline_size = 0;
2510 2509 hca_attr->hca_ud_send_inline_sz = 0;
2511 2510 hca_attr->hca_conn_send_inline_sz = 0;
2512 2511 hca_attr->hca_conn_rdmaw_inline_overhead = 4;
2513 2512
2514 2513 #if defined(_ELF64)
2515 2514 /* 32-bit kernels are too small for Fibre Channel over IB */
2516 2515 if (state->hs_devlim.fcoib && (caps2 & IBT_HCA2_MEM_MGT_EXT)) {
2517 2516 caps2 |= IBT_HCA2_FC;
2518 2517 hca_attr->hca_rfci_max_log2_qp = 7; /* 128 per port */
2519 2518 hca_attr->hca_fexch_max_log2_qp = 16; /* 64K per port */
2520 2519 hca_attr->hca_fexch_max_log2_mem = 20; /* 1MB per MPT */
2521 2520 }
2522 2521 #endif
2523 2522
2524 2523 hca_attr->hca_flags = caps;
2525 2524 hca_attr->hca_flags2 = caps2;
2526 2525
2527 2526 /*
2528 2527 * Set hca_attr's IDs
2529 2528 */
2530 2529 hca_attr->hca_vendor_id = state->hs_vendor_id;
2531 2530 hca_attr->hca_device_id = state->hs_device_id;
2532 2531 hca_attr->hca_version_id = state->hs_revision_id;
2533 2532
2534 2533 /*
2535 2534 * Determine number of available QPs and max QP size. Number of
2536 2535 * available QPs is determined by subtracting the number of
2537 2536 * "reserved QPs" (i.e. reserved for firmware use) from the
2538 2537 * total number configured.
2539 2538 */
2540 2539 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_qp);
2541 2540 hca_attr->hca_max_qp = val - ((uint64_t)1 <<
2542 2541 state->hs_devlim.log_rsvd_qp);
2543 2542 maxval = ((uint64_t)1 << state->hs_devlim.log_max_qp_sz);
2544 2543 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_qp_sz);
2545 2544 if (val > maxval) {
2546 2545 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2547 2546 HERMON_ATTACH_MSG(state->hs_attach_buf,
2548 2547 "soft_state_init_maxqpsz_toobig_fail");
2549 2548 return (DDI_FAILURE);
2550 2549 }
2551 2550 /* we need to reduce this by the max space needed for headroom */
2552 2551 hca_attr->hca_max_qp_sz = (uint_t)val - (HERMON_QP_OH_SIZE >>
2553 2552 HERMON_QP_WQE_LOG_MINIMUM) - 1;
2554 2553
2555 2554 /*
2556 2555 * Determine max scatter-gather size in WQEs. The HCA has split
2557 2556 * the max sgl into rec'v Q and send Q values. Use the least.
2558 2557 *
2559 2558 * This is mainly useful for legacy clients. Smart clients
2560 2559 * such as IPoIB will use the IBT_HCA_WQE_SIZE_INFO sgl info.
2561 2560 */
2562 2561 if (state->hs_devlim.max_sg_rq <= state->hs_devlim.max_sg_sq) {
2563 2562 maxval = state->hs_devlim.max_sg_rq;
2564 2563 } else {
2565 2564 maxval = state->hs_devlim.max_sg_sq;
2566 2565 }
2567 2566 val = state->hs_cfg_profile->cp_wqe_max_sgl;
2568 2567 if (val > maxval) {
2569 2568 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2570 2569 HERMON_ATTACH_MSG(state->hs_attach_buf,
2571 2570 "soft_state_init_toomanysgl_fail");
2572 2571 return (DDI_FAILURE);
2573 2572 }
2574 2573 /* If the rounded value for max SGL is too large, cap it */
2575 2574 if (state->hs_cfg_profile->cp_wqe_real_max_sgl > maxval) {
2576 2575 state->hs_cfg_profile->cp_wqe_real_max_sgl = (uint32_t)maxval;
2577 2576 val = maxval;
2578 2577 } else {
2579 2578 val = state->hs_cfg_profile->cp_wqe_real_max_sgl;
2580 2579 }
2581 2580
2582 2581 hca_attr->hca_max_sgl = (uint_t)val;
2583 2582 hca_attr->hca_max_rd_sgl = 0; /* zero because RD is unsupported */
2584 2583
2585 2584 /*
2586 2585 * Determine number of available CQs and max CQ size. Number of
2587 2586 * available CQs is determined by subtracting the number of
2588 2587 * "reserved CQs" (i.e. reserved for firmware use) from the
2589 2588 * total number configured.
2590 2589 */
2591 2590 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_cq);
2592 2591 hca_attr->hca_max_cq = val - ((uint64_t)1 <<
2593 2592 state->hs_devlim.log_rsvd_cq);
2594 2593 maxval = ((uint64_t)1 << state->hs_devlim.log_max_cq_sz);
2595 2594 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_cq_sz) - 1;
2596 2595 if (val > maxval) {
2597 2596 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2598 2597 HERMON_ATTACH_MSG(state->hs_attach_buf,
2599 2598 "soft_state_init_maxcqsz_toobig_fail");
2600 2599 return (DDI_FAILURE);
2601 2600 }
2602 2601 hca_attr->hca_max_cq_sz = (uint_t)val;
2603 2602
2604 2603 /*
2605 2604 * Determine number of available SRQs and max SRQ size. Number of
2606 2605 * available SRQs is determined by subtracting the number of
2607 2606 * "reserved SRQs" (i.e. reserved for firmware use) from the
2608 2607 * total number configured.
2609 2608 */
2610 2609 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_srq);
2611 2610 hca_attr->hca_max_srqs = val - ((uint64_t)1 <<
2612 2611 state->hs_devlim.log_rsvd_srq);
2613 2612 maxval = ((uint64_t)1 << state->hs_devlim.log_max_srq_sz);
2614 2613 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_srq_sz);
2615 2614
2616 2615 if (val > maxval) {
2617 2616 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2618 2617 HERMON_ATTACH_MSG(state->hs_attach_buf,
2619 2618 "soft_state_init_maxsrqsz_toobig_fail");
2620 2619 return (DDI_FAILURE);
2621 2620 }
2622 2621 hca_attr->hca_max_srqs_sz = (uint_t)val;
2623 2622
2624 2623 val = hca_attr->hca_recv_sgl_sz - 1; /* SRQ has a list link */
2625 2624 maxval = state->hs_devlim.max_sg_rq - 1;
2626 2625 if (val > maxval) {
2627 2626 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2628 2627 HERMON_ATTACH_MSG(state->hs_attach_buf,
2629 2628 "soft_state_init_toomanysrqsgl_fail");
2630 2629 return (DDI_FAILURE);
2631 2630 }
2632 2631 hca_attr->hca_max_srq_sgl = (uint_t)val;
2633 2632
2634 2633 /*
2635 2634 * Determine supported HCA page sizes
2636 2635 * XXX
2637 2636 * For now we simply return the system pagesize as the only supported
2638 2637 * pagesize
2639 2638 */
2640 2639 hca_attr->hca_page_sz = ((PAGESIZE == (1 << 13)) ? IBT_PAGE_8K :
2641 2640 IBT_PAGE_4K);
2642 2641
2643 2642 /*
2644 2643 * Determine number of available MemReg, MemWin, and their max size.
2645 2644 * Number of available MRs and MWs is determined by subtracting
2646 2645 * the number of "reserved MPTs" (i.e. reserved for firmware use)
2647 2646 * from the total number configured for each.
2648 2647 */
2649 2648 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_dmpt);
2650 2649 hca_attr->hca_max_memr = val - ((uint64_t)1 <<
2651 2650 state->hs_devlim.log_rsvd_dmpt);
2652 2651 hca_attr->hca_max_mem_win = state->hs_devlim.mem_win ? (val -
2653 2652 ((uint64_t)1 << state->hs_devlim.log_rsvd_dmpt)) : 0;
2654 2653 maxval = state->hs_devlim.log_max_mrw_sz;
2655 2654 val = state->hs_cfg_profile->cp_log_max_mrw_sz;
2656 2655 if (val > maxval) {
2657 2656 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2658 2657 HERMON_ATTACH_MSG(state->hs_attach_buf,
2659 2658 "soft_state_init_maxmrwsz_toobig_fail");
2660 2659 return (DDI_FAILURE);
2661 2660 }
2662 2661 hca_attr->hca_max_memr_len = ((uint64_t)1 << val);
2663 2662
2664 2663 /* Determine RDMA/Atomic properties */
2665 2664 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_rdb);
2666 2665 hca_attr->hca_max_rsc = (uint_t)val;
2667 2666 val = state->hs_cfg_profile->cp_hca_max_rdma_in_qp;
2668 2667 hca_attr->hca_max_rdma_in_qp = (uint8_t)val;
2669 2668 val = state->hs_cfg_profile->cp_hca_max_rdma_out_qp;
2670 2669 hca_attr->hca_max_rdma_out_qp = (uint8_t)val;
2671 2670 hca_attr->hca_max_rdma_in_ee = 0;
2672 2671 hca_attr->hca_max_rdma_out_ee = 0;
2673 2672
2674 2673 /*
2675 2674 * Determine maximum number of raw IPv6 and Ether QPs. Set to 0
2676 2675 * because neither type of raw QP is supported
2677 2676 */
2678 2677 hca_attr->hca_max_ipv6_qp = 0;
2679 2678 hca_attr->hca_max_ether_qp = 0;
2680 2679
2681 2680 /* Determine max number of MCGs and max QP-per-MCG */
2682 2681 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_qp);
2683 2682 hca_attr->hca_max_mcg_qps = (uint_t)val;
2684 2683 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_mcg);
2685 2684 hca_attr->hca_max_mcg = (uint_t)val;
2686 2685 val = state->hs_cfg_profile->cp_num_qp_per_mcg;
2687 2686 hca_attr->hca_max_qp_per_mcg = (uint_t)val;
2688 2687
2689 2688 /* Determine max number partitions (i.e. PKeys) */
2690 2689 maxval = ((uint64_t)state->hs_cfg_profile->cp_num_ports <<
2691 2690 state->hs_queryport.log_max_pkey);
2692 2691 val = ((uint64_t)state->hs_cfg_profile->cp_num_ports <<
2693 2692 state->hs_cfg_profile->cp_log_max_pkeytbl);
2694 2693
2695 2694 if (val > maxval) {
2696 2695 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2697 2696 HERMON_ATTACH_MSG(state->hs_attach_buf,
2698 2697 "soft_state_init_toomanypkey_fail");
2699 2698 return (DDI_FAILURE);
2700 2699 }
2701 2700 hca_attr->hca_max_partitions = (uint16_t)val;
2702 2701
2703 2702 /* Determine number of ports */
2704 2703 maxval = state->hs_devlim.num_ports;
2705 2704 val = state->hs_cfg_profile->cp_num_ports;
2706 2705 if ((val > maxval) || (val == 0)) {
2707 2706 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2708 2707 HERMON_ATTACH_MSG(state->hs_attach_buf,
2709 2708 "soft_state_init_toomanyports_fail");
2710 2709 return (DDI_FAILURE);
2711 2710 }
2712 2711 hca_attr->hca_nports = (uint8_t)val;
2713 2712
2714 2713 /* Copy NodeGUID and SystemImageGUID from softstate */
2715 2714 hca_attr->hca_node_guid = state->hs_nodeguid;
2716 2715 hca_attr->hca_si_guid = state->hs_sysimgguid;
2717 2716
2718 2717 /*
2719 2718 * Determine local ACK delay. Use the value suggested by the Hermon
2720 2719 * hardware (from the QUERY_DEV_CAP command)
2721 2720 */
2722 2721 hca_attr->hca_local_ack_delay = state->hs_devlim.ca_ack_delay;
2723 2722
2724 2723 /* Determine max SGID table and PKey table sizes */
2725 2724 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_gidtbl);
2726 2725 hca_attr->hca_max_port_sgid_tbl_sz = (uint_t)val;
2727 2726 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_pkeytbl);
2728 2727 hca_attr->hca_max_port_pkey_tbl_sz = (uint16_t)val;
2729 2728
2730 2729 /* Determine max number of PDs */
2731 2730 maxval = ((uint64_t)1 << state->hs_devlim.log_max_pd);
2732 2731 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_pd);
2733 2732 if (val > maxval) {
2734 2733 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2735 2734 HERMON_ATTACH_MSG(state->hs_attach_buf,
2736 2735 "soft_state_init_toomanypd_fail");
2737 2736 return (DDI_FAILURE);
2738 2737 }
2739 2738 hca_attr->hca_max_pd = (uint_t)val;
2740 2739
2741 2740 /* Determine max number of Address Handles (NOT IN ARBEL or HERMON) */
2742 2741 hca_attr->hca_max_ah = 0;
2743 2742
2744 2743 /* No RDDs or EECs (since Reliable Datagram is not supported) */
2745 2744 hca_attr->hca_max_rdd = 0;
2746 2745 hca_attr->hca_max_eec = 0;
2747 2746
2748 2747 /* Initialize lock for reserved UAR page access */
2749 2748 mutex_init(&state->hs_uar_lock, NULL, MUTEX_DRIVER,
2750 2749 DDI_INTR_PRI(state->hs_intrmsi_pri));
2751 2750
2752 2751 /* Initialize the flash fields */
2753 2752 state->hs_fw_flashstarted = 0;
2754 2753 mutex_init(&state->hs_fw_flashlock, NULL, MUTEX_DRIVER,
2755 2754 DDI_INTR_PRI(state->hs_intrmsi_pri));
2756 2755
2757 2756 /* Initialize the lock for the info ioctl */
2758 2757 mutex_init(&state->hs_info_lock, NULL, MUTEX_DRIVER,
2759 2758 DDI_INTR_PRI(state->hs_intrmsi_pri));
2760 2759
2761 2760 /* Initialize the AVL tree for QP number support */
2762 2761 hermon_qpn_avl_init(state);
2763 2762
2764 2763 /* Initialize the cq_sched info structure */
2765 2764 status = hermon_cq_sched_init(state);
2766 2765 if (status != DDI_SUCCESS) {
2767 2766 hermon_qpn_avl_fini(state);
2768 2767 mutex_destroy(&state->hs_info_lock);
2769 2768 mutex_destroy(&state->hs_fw_flashlock);
2770 2769 mutex_destroy(&state->hs_uar_lock);
2771 2770 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2772 2771 HERMON_ATTACH_MSG(state->hs_attach_buf,
2773 2772 "soft_state_init_cqsched_init_fail");
2774 2773 return (DDI_FAILURE);
2775 2774 }
2776 2775
2777 2776 /* Initialize the fcoib info structure */
2778 2777 status = hermon_fcoib_init(state);
2779 2778 if (status != DDI_SUCCESS) {
2780 2779 hermon_cq_sched_fini(state);
2781 2780 hermon_qpn_avl_fini(state);
2782 2781 mutex_destroy(&state->hs_info_lock);
2783 2782 mutex_destroy(&state->hs_fw_flashlock);
2784 2783 mutex_destroy(&state->hs_uar_lock);
2785 2784 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2786 2785 HERMON_ATTACH_MSG(state->hs_attach_buf,
2787 2786 "soft_state_init_fcoibinit_fail");
2788 2787 return (DDI_FAILURE);
2789 2788 }
2790 2789
2791 2790 /* Initialize the kstat info structure */
2792 2791 status = hermon_kstat_init(state);
2793 2792 if (status != DDI_SUCCESS) {
2794 2793 hermon_fcoib_fini(state);
2795 2794 hermon_cq_sched_fini(state);
2796 2795 hermon_qpn_avl_fini(state);
2797 2796 mutex_destroy(&state->hs_info_lock);
2798 2797 mutex_destroy(&state->hs_fw_flashlock);
2799 2798 mutex_destroy(&state->hs_uar_lock);
2800 2799 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2801 2800 HERMON_ATTACH_MSG(state->hs_attach_buf,
2802 2801 "soft_state_init_kstatinit_fail");
2803 2802 return (DDI_FAILURE);
2804 2803 }
2805 2804
2806 2805 return (DDI_SUCCESS);
2807 2806 }
2808 2807
2809 2808
2810 2809 /*
2811 2810 * hermon_soft_state_fini()
2812 2811 * Context: Called only from detach() path context
2813 2812 */
2814 2813 static void
2815 2814 hermon_soft_state_fini(hermon_state_t *state)
2816 2815 {
2817 2816
2818 2817 /* Teardown the kstat info */
2819 2818 hermon_kstat_fini(state);
2820 2819
2821 2820 /* Teardown the fcoib info */
2822 2821 hermon_fcoib_fini(state);
2823 2822
2824 2823 /* Teardown the cq_sched info */
2825 2824 hermon_cq_sched_fini(state);
2826 2825
2827 2826 /* Teardown the AVL tree for QP number support */
2828 2827 hermon_qpn_avl_fini(state);
2829 2828
2830 2829 /* Free up info ioctl mutex */
2831 2830 mutex_destroy(&state->hs_info_lock);
2832 2831
2833 2832 /* Free up flash mutex */
2834 2833 mutex_destroy(&state->hs_fw_flashlock);
2835 2834
2836 2835 /* Free up the UAR page access mutex */
2837 2836 mutex_destroy(&state->hs_uar_lock);
2838 2837
2839 2838 /* Free up the hca_attr struct */
2840 2839 kmem_free(state->hs_ibtfinfo.hca_attr, sizeof (ibt_hca_attr_t));
2841 2840
2842 2841 }
2843 2842
2844 2843 /*
2845 2844 * hermon_icm_config_setup()
2846 2845 * Context: Only called from attach() path context
2847 2846 */
2848 2847 static int
2849 2848 hermon_icm_config_setup(hermon_state_t *state,
2850 2849 hermon_hw_initqueryhca_t *inithca)
2851 2850 {
2852 2851 hermon_hw_querydevlim_t *devlim;
2853 2852 hermon_cfg_profile_t *cfg;
2854 2853 hermon_icm_table_t *icm_p[HERMON_NUM_ICM_RESOURCES];
2855 2854 hermon_icm_table_t *icm;
2856 2855 hermon_icm_table_t *tmp;
2857 2856 uint64_t icm_addr;
2858 2857 uint64_t icm_size;
2859 2858 int status, i, j;
2860 2859
2861 2860
2862 2861 /* Bring in local devlims, cfg_profile and hs_icm table list */
2863 2862 devlim = &state->hs_devlim;
2864 2863 cfg = state->hs_cfg_profile;
2865 2864 icm = state->hs_icm;
2866 2865
2867 2866 /*
2868 2867 * Assign each ICM table's entry size from data in the devlims,
2869 2868 * except for RDB and MCG sizes, which are not returned in devlims
2870 2869 * but do have a fixed size, and the UAR context entry size, which
2871 2870 * we determine. For this, we use the "cp_num_pgs_per_uce" value
2872 2871 * from our hs_cfg_profile.
2873 2872 */
2874 2873 icm[HERMON_CMPT].object_size = devlim->cmpt_entry_sz;
2875 2874 icm[HERMON_CMPT_QPC].object_size = devlim->cmpt_entry_sz;
2876 2875 icm[HERMON_CMPT_SRQC].object_size = devlim->cmpt_entry_sz;
2877 2876 icm[HERMON_CMPT_CQC].object_size = devlim->cmpt_entry_sz;
2878 2877 icm[HERMON_CMPT_EQC].object_size = devlim->cmpt_entry_sz;
2879 2878 icm[HERMON_MTT].object_size = devlim->mtt_entry_sz;
2880 2879 icm[HERMON_DMPT].object_size = devlim->dmpt_entry_sz;
2881 2880 icm[HERMON_QPC].object_size = devlim->qpc_entry_sz;
2882 2881 icm[HERMON_CQC].object_size = devlim->cqc_entry_sz;
2883 2882 icm[HERMON_SRQC].object_size = devlim->srq_entry_sz;
2884 2883 icm[HERMON_EQC].object_size = devlim->eqc_entry_sz;
2885 2884 icm[HERMON_RDB].object_size = devlim->rdmardc_entry_sz *
2886 2885 cfg->cp_hca_max_rdma_in_qp;
2887 2886 icm[HERMON_MCG].object_size = HERMON_MCGMEM_SZ(state);
2888 2887 icm[HERMON_ALTC].object_size = devlim->altc_entry_sz;
2889 2888 icm[HERMON_AUXC].object_size = devlim->aux_entry_sz;
2890 2889
2891 2890 /* Assign each ICM table's log2 number of entries */
2892 2891 icm[HERMON_CMPT].log_num_entries = cfg->cp_log_num_cmpt;
2893 2892 icm[HERMON_CMPT_QPC].log_num_entries = cfg->cp_log_num_qp;
2894 2893 icm[HERMON_CMPT_SRQC].log_num_entries = cfg->cp_log_num_srq;
2895 2894 icm[HERMON_CMPT_CQC].log_num_entries = cfg->cp_log_num_cq;
2896 2895 icm[HERMON_CMPT_EQC].log_num_entries = HERMON_NUM_EQ_SHIFT;
2897 2896 icm[HERMON_MTT].log_num_entries = cfg->cp_log_num_mtt;
2898 2897 icm[HERMON_DMPT].log_num_entries = cfg->cp_log_num_dmpt;
2899 2898 icm[HERMON_QPC].log_num_entries = cfg->cp_log_num_qp;
2900 2899 icm[HERMON_SRQC].log_num_entries = cfg->cp_log_num_srq;
2901 2900 icm[HERMON_CQC].log_num_entries = cfg->cp_log_num_cq;
2902 2901 icm[HERMON_EQC].log_num_entries = HERMON_NUM_EQ_SHIFT;
2903 2902 icm[HERMON_RDB].log_num_entries = cfg->cp_log_num_qp;
2904 2903 icm[HERMON_MCG].log_num_entries = cfg->cp_log_num_mcg;
2905 2904 icm[HERMON_ALTC].log_num_entries = cfg->cp_log_num_qp;
2906 2905 icm[HERMON_AUXC].log_num_entries = cfg->cp_log_num_qp;
2907 2906
2908 2907 /* Initialize the ICM tables */
2909 2908 hermon_icm_tables_init(state);
2910 2909
2911 2910 /*
2912 2911 * ICM tables must be aligned on their size in the ICM address
2913 2912 * space. So, here we order the tables from largest total table
2914 2913 * size to the smallest. All tables are a power of 2 in size, so
2915 2914 * this will ensure that all tables are aligned on their own size
2916 2915 * without wasting space in the ICM.
2917 2916 *
2918 2917 * In order to easily set the ICM addresses without needing to
2919 2918 * worry about the ordering of our table indices as relates to
2920 2919 * the hermon_rsrc_type_t enum, we will use a list of pointers
2921 2920 * representing the tables for the sort, then assign ICM addresses
2922 2921 * below using it.
2923 2922 */
2924 2923 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
2925 2924 icm_p[i] = &icm[i];
2926 2925 }
2927 2926 for (i = HERMON_NUM_ICM_RESOURCES; i > 0; i--) {
2928 2927 switch (i) {
2929 2928 case HERMON_CMPT_QPC:
2930 2929 case HERMON_CMPT_SRQC:
2931 2930 case HERMON_CMPT_CQC:
2932 2931 case HERMON_CMPT_EQC:
2933 2932 continue;
2934 2933 }
2935 2934 for (j = 1; j < i; j++) {
2936 2935 if (icm_p[j]->table_size > icm_p[j - 1]->table_size) {
2937 2936 tmp = icm_p[j];
2938 2937 icm_p[j] = icm_p[j - 1];
2939 2938 icm_p[j - 1] = tmp;
2940 2939 }
2941 2940 }
2942 2941 }
2943 2942
2944 2943 /* Initialize the ICM address and ICM size */
2945 2944 icm_addr = icm_size = 0;
2946 2945
2947 2946 /*
2948 2947 * Set the ICM base address of each table, using our sorted
2949 2948 * list of pointers from above.
2950 2949 */
2951 2950 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
2952 2951 j = icm_p[i]->icm_type;
2953 2952 switch (j) {
2954 2953 case HERMON_CMPT_QPC:
2955 2954 case HERMON_CMPT_SRQC:
2956 2955 case HERMON_CMPT_CQC:
2957 2956 case HERMON_CMPT_EQC:
2958 2957 continue;
2959 2958 }
2960 2959 if (icm[j].table_size) {
2961 2960 /*
2962 2961 * Set the ICM base address in the table, save the
2963 2962 * ICM offset in the rsrc pool and increment the
2964 2963 * total ICM allocation.
2965 2964 */
2966 2965 icm[j].icm_baseaddr = icm_addr;
2967 2966 if (hermon_verbose) {
2968 2967 IBTF_DPRINTF_L2("ICMADDR", "rsrc %x @ %p"
2969 2968 " size %llx", j, icm[j].icm_baseaddr,
2970 2969 icm[j].table_size);
2971 2970 }
2972 2971 icm_size += icm[j].table_size;
2973 2972 }
2974 2973
2975 2974 /* Verify that we don't exceed maximum ICM size */
2976 2975 if (icm_size > devlim->max_icm_size) {
2977 2976 /* free the ICM table memory resources */
2978 2977 hermon_icm_tables_fini(state);
2979 2978 cmn_err(CE_WARN, "ICM configuration exceeds maximum "
2980 2979 "configuration: max (0x%lx) requested (0x%lx)\n",
2981 2980 (ulong_t)devlim->max_icm_size, (ulong_t)icm_size);
2982 2981 HERMON_ATTACH_MSG(state->hs_attach_buf,
2983 2982 "icm_config_toobig_fail");
2984 2983 return (DDI_FAILURE);
2985 2984 }
2986 2985
2987 2986 /* assign address to the 4 pieces of the CMPT */
2988 2987 if (j == HERMON_CMPT) {
2989 2988 uint64_t cmpt_size = icm[j].table_size >> 2;
2990 2989 #define init_cmpt_icm_baseaddr(rsrc, indx) \
2991 2990 icm[rsrc].icm_baseaddr = icm_addr + (indx * cmpt_size);
2992 2991 init_cmpt_icm_baseaddr(HERMON_CMPT_QPC, 0);
2993 2992 init_cmpt_icm_baseaddr(HERMON_CMPT_SRQC, 1);
2994 2993 init_cmpt_icm_baseaddr(HERMON_CMPT_CQC, 2);
2995 2994 init_cmpt_icm_baseaddr(HERMON_CMPT_EQC, 3);
2996 2995 }
2997 2996
2998 2997 /* Increment the ICM address for the next table */
2999 2998 icm_addr += icm[j].table_size;
3000 2999 }
3001 3000
3002 3001 /* Populate the structure for the INIT_HCA command */
3003 3002 hermon_inithca_set(state, inithca);
3004 3003
3005 3004 /*
3006 3005 * Prior to invoking INIT_HCA, we must have ICM memory in place
3007 3006 * for the reserved objects in each table. We will allocate and map
3008 3007 * this initial ICM memory here. Note that given the assignment
3009 3008 * of span_size above, tables that are smaller or equal in total
3010 3009 * size to the default span_size will be mapped in full.
3011 3010 */
3012 3011 status = hermon_icm_dma_init(state);
3013 3012 if (status != DDI_SUCCESS) {
3014 3013 /* free the ICM table memory resources */
3015 3014 hermon_icm_tables_fini(state);
3016 3015 HERMON_WARNING(state, "Failed to allocate initial ICM");
3017 3016 HERMON_ATTACH_MSG(state->hs_attach_buf,
3018 3017 "icm_config_dma_init_fail");
3019 3018 return (DDI_FAILURE);
3020 3019 }
3021 3020
3022 3021 return (DDI_SUCCESS);
3023 3022 }
3024 3023
3025 3024 /*
3026 3025 * hermon_inithca_set()
3027 3026 * Context: Only called from attach() path context
3028 3027 */
3029 3028 static void
3030 3029 hermon_inithca_set(hermon_state_t *state, hermon_hw_initqueryhca_t *inithca)
3031 3030 {
3032 3031 hermon_cfg_profile_t *cfg;
3033 3032 hermon_icm_table_t *icm;
3034 3033 int i;
3035 3034
3036 3035
3037 3036 /* Populate the INIT_HCA structure */
3038 3037 icm = state->hs_icm;
3039 3038 cfg = state->hs_cfg_profile;
3040 3039
3041 3040 /* set version */
3042 3041 inithca->version = 0x02; /* PRM 0.36 */
3043 3042 /* set cacheline - log2 in 16-byte chunks */
3044 3043 inithca->log2_cacheline = 0x2; /* optimized for 64 byte cache */
3045 3044
3046 3045 /* we need to update the inithca info with thie UAR info too */
3047 3046 inithca->uar.log_max_uars = highbit(cfg->cp_log_num_uar);
3048 3047 inithca->uar.uar_pg_sz = PAGESHIFT - HERMON_PAGESHIFT;
3049 3048
3050 3049 /* Set endianess */
3051 3050 #ifdef _LITTLE_ENDIAN
3052 3051 inithca->big_endian = 0;
3053 3052 #else
3054 3053 inithca->big_endian = 1;
3055 3054 #endif
3056 3055
3057 3056 /* Port Checking is on by default */
3058 3057 inithca->udav_port_chk = HERMON_UDAV_PORTCHK_ENABLED;
3059 3058
3060 3059 /* Enable IPoIB checksum */
3061 3060 if (state->hs_devlim.ipoib_cksm)
3062 3061 inithca->chsum_en = 1;
3063 3062
3064 3063 /* Set each ICM table's attributes */
3065 3064 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
3066 3065 switch (icm[i].icm_type) {
3067 3066 case HERMON_CMPT:
3068 3067 inithca->tpt.cmpt_baseaddr = icm[i].icm_baseaddr;
3069 3068 break;
3070 3069
3071 3070 case HERMON_MTT:
3072 3071 inithca->tpt.mtt_baseaddr = icm[i].icm_baseaddr;
3073 3072 break;
3074 3073
3075 3074 case HERMON_DMPT:
3076 3075 inithca->tpt.dmpt_baseaddr = icm[i].icm_baseaddr;
3077 3076 inithca->tpt.log_dmpt_sz = icm[i].log_num_entries;
3078 3077 inithca->tpt.pgfault_rnr_to = 0; /* just in case */
3079 3078 break;
3080 3079
3081 3080 case HERMON_QPC:
3082 3081 inithca->context.log_num_qp = icm[i].log_num_entries;
3083 3082 inithca->context.qpc_baseaddr_h =
3084 3083 icm[i].icm_baseaddr >> 32;
3085 3084 inithca->context.qpc_baseaddr_l =
3086 3085 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3087 3086 break;
3088 3087
3089 3088 case HERMON_CQC:
3090 3089 inithca->context.log_num_cq = icm[i].log_num_entries;
3091 3090 inithca->context.cqc_baseaddr_h =
3092 3091 icm[i].icm_baseaddr >> 32;
3093 3092 inithca->context.cqc_baseaddr_l =
3094 3093 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3095 3094 break;
3096 3095
3097 3096 case HERMON_SRQC:
3098 3097 inithca->context.log_num_srq = icm[i].log_num_entries;
3099 3098 inithca->context.srqc_baseaddr_h =
3100 3099 icm[i].icm_baseaddr >> 32;
3101 3100 inithca->context.srqc_baseaddr_l =
3102 3101 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3103 3102 break;
3104 3103
3105 3104 case HERMON_EQC:
3106 3105 inithca->context.log_num_eq = icm[i].log_num_entries;
3107 3106 inithca->context.eqc_baseaddr_h =
3108 3107 icm[i].icm_baseaddr >> 32;
3109 3108 inithca->context.eqc_baseaddr_l =
3110 3109 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3111 3110 break;
3112 3111
3113 3112 case HERMON_RDB:
3114 3113 inithca->context.rdmardc_baseaddr_h =
3115 3114 icm[i].icm_baseaddr >> 32;
3116 3115 inithca->context.rdmardc_baseaddr_l =
3117 3116 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3118 3117 inithca->context.log_num_rdmardc =
3119 3118 cfg->cp_log_num_rdb - cfg->cp_log_num_qp;
3120 3119 break;
3121 3120
3122 3121 case HERMON_MCG:
3123 3122 inithca->multi.mc_baseaddr = icm[i].icm_baseaddr;
3124 3123 inithca->multi.log_mc_tbl_sz = icm[i].log_num_entries;
3125 3124 inithca->multi.log_mc_tbl_ent =
3126 3125 highbit(HERMON_MCGMEM_SZ(state)) - 1;
3127 3126 inithca->multi.log_mc_tbl_hash_sz =
3128 3127 cfg->cp_log_num_mcg_hash;
3129 3128 inithca->multi.mc_hash_fn = HERMON_MCG_DEFAULT_HASH_FN;
3130 3129 break;
3131 3130
3132 3131 case HERMON_ALTC:
3133 3132 inithca->context.altc_baseaddr = icm[i].icm_baseaddr;
3134 3133 break;
3135 3134
3136 3135 case HERMON_AUXC:
3137 3136 inithca->context.auxc_baseaddr = icm[i].icm_baseaddr;
3138 3137 break;
3139 3138
3140 3139 default:
3141 3140 break;
3142 3141
3143 3142 }
3144 3143 }
3145 3144
3146 3145 }
3147 3146
3148 3147 /*
3149 3148 * hermon_icm_tables_init()
3150 3149 * Context: Only called from attach() path context
3151 3150 *
3152 3151 * Dynamic ICM breaks the various ICM tables into "span_size" chunks
3153 3152 * to enable allocation of backing memory on demand. Arbel used a
3154 3153 * fixed size ARBEL_ICM_SPAN_SIZE (initially was 512KB) as the
3155 3154 * span_size for all ICM chunks. Hermon has other considerations,
3156 3155 * so the span_size used differs from Arbel.
3157 3156 *
3158 3157 * The basic considerations for why Hermon differs are:
3159 3158 *
3160 3159 * 1) ICM memory is in units of HERMON pages.
3161 3160 *
3162 3161 * 2) The AUXC table is approximately 1 byte per QP.
3163 3162 *
3164 3163 * 3) ICM memory for AUXC, ALTC, and RDB is allocated when
3165 3164 * the ICM memory for the corresponding QPC is allocated.
3166 3165 *
3167 3166 * 4) ICM memory for the CMPT corresponding to the various primary
3168 3167 * resources (QPC, SRQC, CQC, and EQC) is allocated when the ICM
3169 3168 * memory for the primary resource is allocated.
3170 3169 *
3171 3170 * One HERMON page (4KB) would typically map 4K QPs worth of AUXC.
3172 3171 * So, the minimum chunk for the various QPC related ICM memory should
3173 3172 * all be allocated to support the 4K QPs. Currently, this means the
3174 3173 * amount of memory for the various QP chunks is:
3175 3174 *
3176 3175 * QPC 256*4K bytes
3177 3176 * RDB 128*4K bytes
3178 3177 * CMPT 64*4K bytes
3179 3178 * ALTC 64*4K bytes
3180 3179 * AUXC 1*4K bytes
3181 3180 *
3182 3181 * The span_size chosen for the QP resource is 4KB of AUXC entries,
3183 3182 * or 1 HERMON_PAGESIZE worth, which is the minimum ICM mapping size.
3184 3183 *
3185 3184 * Other ICM resources can have their span_size be more arbitrary.
3186 3185 * This is 4K (HERMON_ICM_SPAN), except for MTTs because they are tiny.
3187 3186 */
3188 3187
3189 3188 /* macro to make the code below cleaner */
3190 3189 #define init_dependent(rsrc, dep) \
3191 3190 icm[dep].span = icm[rsrc].span; \
3192 3191 icm[dep].num_spans = icm[rsrc].num_spans; \
3193 3192 icm[dep].split_shift = icm[rsrc].split_shift; \
3194 3193 icm[dep].span_mask = icm[rsrc].span_mask; \
3195 3194 icm[dep].span_shift = icm[rsrc].span_shift; \
3196 3195 icm[dep].rsrc_mask = icm[rsrc].rsrc_mask; \
3197 3196 if (hermon_verbose) { \
3198 3197 IBTF_DPRINTF_L2("hermon", "tables_init: " \
3199 3198 "rsrc (0x%x) size (0x%lx) span (0x%x) " \
3200 3199 "num_spans (0x%x)", dep, icm[dep].table_size, \
3201 3200 icm[dep].span, icm[dep].num_spans); \
3202 3201 IBTF_DPRINTF_L2("hermon", "tables_init: " \
3203 3202 "span_shift (0x%x) split_shift (0x%x)", \
3204 3203 icm[dep].span_shift, icm[dep].split_shift); \
3205 3204 IBTF_DPRINTF_L2("hermon", "tables_init: " \
3206 3205 "span_mask (0x%x) rsrc_mask (0x%x)", \
3207 3206 icm[dep].span_mask, icm[dep].rsrc_mask); \
3208 3207 }
3209 3208
3210 3209 static void
3211 3210 hermon_icm_tables_init(hermon_state_t *state)
3212 3211 {
3213 3212 hermon_icm_table_t *icm;
3214 3213 int i, k;
3215 3214 uint32_t per_split;
3216 3215
3217 3216
3218 3217 icm = state->hs_icm;
3219 3218
3220 3219 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
3221 3220 icm[i].icm_type = i;
3222 3221 icm[i].num_entries = 1 << icm[i].log_num_entries;
3223 3222 icm[i].log_object_size = highbit(icm[i].object_size) - 1;
3224 3223 icm[i].table_size = icm[i].num_entries <<
3225 3224 icm[i].log_object_size;
3226 3225
3227 3226 /* deal with "dependent" resource types */
3228 3227 switch (i) {
3229 3228 case HERMON_AUXC:
3230 3229 #ifdef HERMON_FW_WORKAROUND
3231 3230 icm[i].table_size = 0x80000000ull;
3232 3231 /* FALLTHROUGH */
3233 3232 #endif
3234 3233 case HERMON_CMPT_QPC:
3235 3234 case HERMON_RDB:
3236 3235 case HERMON_ALTC:
3237 3236 init_dependent(HERMON_QPC, i);
3238 3237 continue;
3239 3238 case HERMON_CMPT_SRQC:
3240 3239 init_dependent(HERMON_SRQC, i);
3241 3240 continue;
3242 3241 case HERMON_CMPT_CQC:
3243 3242 init_dependent(HERMON_CQC, i);
3244 3243 continue;
3245 3244 case HERMON_CMPT_EQC:
3246 3245 init_dependent(HERMON_EQC, i);
3247 3246 continue;
3248 3247 }
3249 3248
3250 3249 icm[i].span = HERMON_ICM_SPAN; /* default #rsrc's in 1 span */
3251 3250 if (i == HERMON_MTT) /* Alloc enough MTTs to map 256MB */
3252 3251 icm[i].span = HERMON_ICM_SPAN * 16;
3253 3252 icm[i].num_spans = icm[i].num_entries / icm[i].span;
3254 3253 if (icm[i].num_spans == 0) {
3255 3254 icm[i].span = icm[i].num_entries;
3256 3255 per_split = 1;
3257 3256 icm[i].num_spans = icm[i].num_entries / icm[i].span;
3258 3257 } else {
3259 3258 per_split = icm[i].num_spans / HERMON_ICM_SPLIT;
3260 3259 if (per_split == 0) {
3261 3260 per_split = 1;
3262 3261 }
3263 3262 }
3264 3263 if (hermon_verbose)
3265 3264 IBTF_DPRINTF_L2("ICM", "rsrc %x span %x num_spans %x",
3266 3265 i, icm[i].span, icm[i].num_spans);
3267 3266
3268 3267 /*
3269 3268 * Ensure a minimum table size of an ICM page, and a
3270 3269 * maximum span size of the ICM table size. This ensures
3271 3270 * that we don't have less than an ICM page to map, which is
3272 3271 * impossible, and that we will map an entire table at
3273 3272 * once if it's total size is less than the span size.
3274 3273 */
3275 3274 icm[i].table_size = max(icm[i].table_size, HERMON_PAGESIZE);
3276 3275
3277 3276 icm[i].span_shift = 0;
3278 3277 for (k = icm[i].span; k != 1; k >>= 1)
3279 3278 icm[i].span_shift++;
3280 3279 icm[i].split_shift = icm[i].span_shift;
3281 3280 for (k = per_split; k != 1; k >>= 1)
3282 3281 icm[i].split_shift++;
3283 3282 icm[i].span_mask = (1 << icm[i].split_shift) -
3284 3283 (1 << icm[i].span_shift);
3285 3284 icm[i].rsrc_mask = (1 << icm[i].span_shift) - 1;
3286 3285
3287 3286
3288 3287 /* Initialize the table lock */
3289 3288 mutex_init(&icm[i].icm_table_lock, NULL, MUTEX_DRIVER,
3290 3289 DDI_INTR_PRI(state->hs_intrmsi_pri));
3291 3290 cv_init(&icm[i].icm_table_cv, NULL, CV_DRIVER, NULL);
3292 3291
3293 3292 if (hermon_verbose) {
3294 3293 IBTF_DPRINTF_L2("hermon", "tables_init: "
3295 3294 "rsrc (0x%x) size (0x%lx)", i, icm[i].table_size);
3296 3295 IBTF_DPRINTF_L2("hermon", "tables_init: "
3297 3296 "span (0x%x) num_spans (0x%x)",
3298 3297 icm[i].span, icm[i].num_spans);
3299 3298 IBTF_DPRINTF_L2("hermon", "tables_init: "
3300 3299 "span_shift (0x%x) split_shift (0x%x)",
3301 3300 icm[i].span_shift, icm[i].split_shift);
3302 3301 IBTF_DPRINTF_L2("hermon", "tables_init: "
3303 3302 "span_mask (0x%x) rsrc_mask (0x%x)",
3304 3303 icm[i].span_mask, icm[i].rsrc_mask);
3305 3304 }
3306 3305 }
3307 3306
3308 3307 }
3309 3308
3310 3309 /*
3311 3310 * hermon_icm_tables_fini()
3312 3311 * Context: Only called from attach() path context
3313 3312 *
3314 3313 * Clean up all icm_tables. Free the bitmap and dma_info arrays.
3315 3314 */
3316 3315 static void
3317 3316 hermon_icm_tables_fini(hermon_state_t *state)
3318 3317 {
3319 3318 hermon_icm_table_t *icm;
3320 3319 int nspans;
3321 3320 int i, j;
3322 3321
3323 3322
3324 3323 icm = state->hs_icm;
3325 3324
3326 3325 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
3327 3326
3328 3327 mutex_enter(&icm[i].icm_table_lock);
3329 3328 nspans = icm[i].num_spans;
3330 3329
3331 3330 for (j = 0; j < HERMON_ICM_SPLIT; j++) {
3332 3331 if (icm[i].icm_dma[j])
3333 3332 /* Free the ICM DMA slots */
3334 3333 kmem_free(icm[i].icm_dma[j],
3335 3334 nspans * sizeof (hermon_dma_info_t));
3336 3335
3337 3336 if (icm[i].icm_bitmap[j])
3338 3337 /* Free the table bitmap */
3339 3338 kmem_free(icm[i].icm_bitmap[j],
3340 3339 (nspans + 7) / 8);
3341 3340 }
3342 3341 /* Destroy the table lock */
3343 3342 cv_destroy(&icm[i].icm_table_cv);
3344 3343 mutex_exit(&icm[i].icm_table_lock);
3345 3344 mutex_destroy(&icm[i].icm_table_lock);
3346 3345 }
3347 3346
3348 3347 }
3349 3348
3350 3349 /*
3351 3350 * hermon_icm_dma_init()
3352 3351 * Context: Only called from attach() path context
3353 3352 */
3354 3353 static int
3355 3354 hermon_icm_dma_init(hermon_state_t *state)
3356 3355 {
3357 3356 hermon_icm_table_t *icm;
3358 3357 hermon_rsrc_type_t type;
3359 3358 int status;
3360 3359
3361 3360
3362 3361 /*
3363 3362 * This routine will allocate initial ICM DMA resources for ICM
3364 3363 * tables that have reserved ICM objects. This is the only routine
3365 3364 * where we should have to allocate ICM outside of hermon_rsrc_alloc().
3366 3365 * We need to allocate ICM here explicitly, rather than in
3367 3366 * hermon_rsrc_alloc(), because we've not yet completed the resource
3368 3367 * pool initialization. When the resource pools are initialized
3369 3368 * (in hermon_rsrc_init_phase2(), see hermon_rsrc.c for more
3370 3369 * information), resource preallocations will be invoked to match
3371 3370 * the ICM allocations seen here. We will then be able to use the
3372 3371 * normal allocation path. Note we don't need to set a refcnt on
3373 3372 * these initial allocations because that will be done in the calls
3374 3373 * to hermon_rsrc_alloc() from hermon_hw_entries_init() for the
3375 3374 * "prealloc" objects (see hermon_rsrc.c for more information).
3376 3375 */
3377 3376 for (type = 0; type < HERMON_NUM_ICM_RESOURCES; type++) {
3378 3377
3379 3378 /* ICM for these is allocated within hermon_icm_alloc() */
3380 3379 switch (type) {
3381 3380 case HERMON_CMPT:
3382 3381 case HERMON_CMPT_QPC:
3383 3382 case HERMON_CMPT_SRQC:
3384 3383 case HERMON_CMPT_CQC:
3385 3384 case HERMON_CMPT_EQC:
3386 3385 case HERMON_AUXC:
3387 3386 case HERMON_ALTC:
3388 3387 case HERMON_RDB:
3389 3388 continue;
3390 3389 }
3391 3390
3392 3391 icm = &state->hs_icm[type];
3393 3392
3394 3393 mutex_enter(&icm->icm_table_lock);
3395 3394 status = hermon_icm_alloc(state, type, 0, 0);
3396 3395 mutex_exit(&icm->icm_table_lock);
3397 3396 if (status != DDI_SUCCESS) {
3398 3397 while (type--) {
3399 3398 icm = &state->hs_icm[type];
3400 3399 mutex_enter(&icm->icm_table_lock);
3401 3400 hermon_icm_free(state, type, 0, 0);
3402 3401 mutex_exit(&icm->icm_table_lock);
3403 3402 }
3404 3403 return (DDI_FAILURE);
3405 3404 }
3406 3405
3407 3406 if (hermon_verbose) {
3408 3407 IBTF_DPRINTF_L2("hermon", "hermon_icm_dma_init: "
3409 3408 "table (0x%x) index (0x%x) allocated", type, 0);
3410 3409 }
3411 3410 }
3412 3411
3413 3412 return (DDI_SUCCESS);
3414 3413 }
3415 3414
3416 3415 /*
3417 3416 * hermon_icm_dma_fini()
3418 3417 * Context: Only called from attach() path context
3419 3418 *
3420 3419 * ICM has been completely unmapped. We just free the memory here.
3421 3420 */
3422 3421 static void
3423 3422 hermon_icm_dma_fini(hermon_state_t *state)
3424 3423 {
3425 3424 hermon_icm_table_t *icm;
3426 3425 hermon_dma_info_t *dma_info;
3427 3426 hermon_rsrc_type_t type;
3428 3427 int index1, index2;
3429 3428
3430 3429
3431 3430 for (type = 0; type < HERMON_NUM_ICM_RESOURCES; type++) {
3432 3431 icm = &state->hs_icm[type];
3433 3432 for (index1 = 0; index1 < HERMON_ICM_SPLIT; index1++) {
3434 3433 dma_info = icm->icm_dma[index1];
3435 3434 if (dma_info == NULL)
3436 3435 continue;
3437 3436 for (index2 = 0; index2 < icm->num_spans; index2++) {
3438 3437 if (dma_info[index2].dma_hdl)
3439 3438 hermon_dma_free(&dma_info[index2]);
3440 3439 dma_info[index2].dma_hdl = NULL;
3441 3440 }
3442 3441 }
3443 3442 }
3444 3443
3445 3444 }
3446 3445
3447 3446 /*
3448 3447 * hermon_hca_port_init()
3449 3448 * Context: Only called from attach() path context
3450 3449 */
3451 3450 static int
3452 3451 hermon_hca_port_init(hermon_state_t *state)
3453 3452 {
3454 3453 hermon_hw_set_port_t *portinits, *initport;
3455 3454 hermon_cfg_profile_t *cfgprof;
3456 3455 uint_t num_ports;
3457 3456 int i = 0, status;
3458 3457 uint64_t maxval, val;
3459 3458 uint64_t sysimgguid, nodeguid, portguid;
3460 3459
3461 3460
3462 3461 cfgprof = state->hs_cfg_profile;
3463 3462
3464 3463 /* Get number of HCA ports */
3465 3464 num_ports = cfgprof->cp_num_ports;
3466 3465
3467 3466 /* Allocate space for Hermon set port struct(s) */
3468 3467 portinits = (hermon_hw_set_port_t *)kmem_zalloc(num_ports *
3469 3468 sizeof (hermon_hw_set_port_t), KM_SLEEP);
3470 3469
3471 3470
3472 3471
3473 3472 /* Post commands to initialize each Hermon HCA port */
3474 3473 /*
3475 3474 * In Hermon, the process is different than in previous HCAs.
3476 3475 * Here, you have to:
3477 3476 * QUERY_PORT - to get basic information from the HCA
3478 3477 * set the fields accordingly
3479 3478 * SET_PORT - to change/set everything as desired
3480 3479 * INIT_PORT - to bring the port up
3481 3480 *
3482 3481 * Needs to be done for each port in turn
3483 3482 */
3484 3483
3485 3484 for (i = 0; i < num_ports; i++) {
3486 3485 bzero(&state->hs_queryport, sizeof (hermon_hw_query_port_t));
3487 3486 status = hermon_cmn_query_cmd_post(state, QUERY_PORT, 0,
3488 3487 (i + 1), &state->hs_queryport,
3489 3488 sizeof (hermon_hw_query_port_t), HERMON_CMD_NOSLEEP_SPIN);
3490 3489 if (status != HERMON_CMD_SUCCESS) {
3491 3490 cmn_err(CE_CONT, "Hermon: QUERY_PORT (port %02d) "
3492 3491 "command failed: %08x\n", i + 1, status);
3493 3492 goto init_ports_fail;
3494 3493 }
3495 3494 initport = &portinits[i];
3496 3495 state->hs_initport = &portinits[i];
3497 3496
3498 3497 bzero(initport, sizeof (hermon_hw_query_port_t));
3499 3498
3500 3499 /*
3501 3500 * Determine whether we need to override the firmware's
3502 3501 * default SystemImageGUID setting.
3503 3502 */
3504 3503 sysimgguid = cfgprof->cp_sysimgguid;
3505 3504 if (sysimgguid != 0) {
3506 3505 initport->sig = 1;
3507 3506 initport->sys_img_guid = sysimgguid;
3508 3507 }
3509 3508
3510 3509 /*
3511 3510 * Determine whether we need to override the firmware's
3512 3511 * default NodeGUID setting.
3513 3512 */
3514 3513 nodeguid = cfgprof->cp_nodeguid;
3515 3514 if (nodeguid != 0) {
3516 3515 initport->ng = 1;
3517 3516 initport->node_guid = nodeguid;
3518 3517 }
3519 3518
3520 3519 /*
3521 3520 * Determine whether we need to override the firmware's
3522 3521 * default PortGUID setting.
3523 3522 */
3524 3523 portguid = cfgprof->cp_portguid[i];
3525 3524 if (portguid != 0) {
3526 3525 initport->g0 = 1;
3527 3526 initport->guid0 = portguid;
3528 3527 }
3529 3528
3530 3529 /* Validate max MTU size */
3531 3530 maxval = state->hs_queryport.ib_mtu;
3532 3531 val = cfgprof->cp_max_mtu;
3533 3532 if (val > maxval) {
3534 3533 goto init_ports_fail;
3535 3534 }
3536 3535
3537 3536 /* Set mtu_cap to 4096 bytes */
3538 3537 initport->mmc = 1; /* set the change bit */
3539 3538 initport->mtu_cap = 5; /* for 4096 bytes */
3540 3539
3541 3540 /* Validate the max port width */
3542 3541 maxval = state->hs_queryport.ib_port_wid;
3543 3542 val = cfgprof->cp_max_port_width;
3544 3543 if (val > maxval) {
3545 3544 goto init_ports_fail;
3546 3545 }
3547 3546
3548 3547 /* Validate max VL cap size */
3549 3548 maxval = state->hs_queryport.max_vl;
3550 3549 val = cfgprof->cp_max_vlcap;
3551 3550 if (val > maxval) {
3552 3551 goto init_ports_fail;
3553 3552 }
3554 3553
3555 3554 /* Since we're doing mtu_cap, cut vl_cap down */
3556 3555 initport->mvc = 1; /* set this change bit */
3557 3556 initport->vl_cap = 3; /* 3 means vl0-vl3, 4 total */
3558 3557
3559 3558 /* Validate max GID table size */
3560 3559 maxval = ((uint64_t)1 << state->hs_queryport.log_max_gid);
3561 3560 val = ((uint64_t)1 << cfgprof->cp_log_max_gidtbl);
3562 3561 if (val > maxval) {
3563 3562 goto init_ports_fail;
3564 3563 }
3565 3564 initport->max_gid = (uint16_t)val;
3566 3565 initport->mg = 1;
3567 3566
3568 3567 /* Validate max PKey table size */
3569 3568 maxval = ((uint64_t)1 << state->hs_queryport.log_max_pkey);
3570 3569 val = ((uint64_t)1 << cfgprof->cp_log_max_pkeytbl);
3571 3570 if (val > maxval) {
3572 3571 goto init_ports_fail;
3573 3572 }
3574 3573 initport->max_pkey = (uint16_t)val;
3575 3574 initport->mp = 1;
3576 3575 /*
3577 3576 * Post the SET_PORT cmd to Hermon firmware. This sets
3578 3577 * the parameters of the port.
3579 3578 */
3580 3579 status = hermon_set_port_cmd_post(state, initport, i + 1,
3581 3580 HERMON_CMD_NOSLEEP_SPIN);
3582 3581 if (status != HERMON_CMD_SUCCESS) {
3583 3582 cmn_err(CE_CONT, "Hermon: SET_PORT (port %02d) command "
3584 3583 "failed: %08x\n", i + 1, status);
3585 3584 goto init_ports_fail;
3586 3585 }
3587 3586 /* issue another SET_PORT cmd - performance fix/workaround */
3588 3587 /* XXX - need to discuss with Mellanox */
3589 3588 bzero(initport, sizeof (hermon_hw_query_port_t));
3590 3589 initport->cap_mask = 0x02500868;
3591 3590 status = hermon_set_port_cmd_post(state, initport, i + 1,
3592 3591 HERMON_CMD_NOSLEEP_SPIN);
3593 3592 if (status != HERMON_CMD_SUCCESS) {
3594 3593 cmn_err(CE_CONT, "Hermon: SET_PORT (port %02d) command "
3595 3594 "failed: %08x\n", i + 1, status);
3596 3595 goto init_ports_fail;
3597 3596 }
3598 3597 }
3599 3598
3600 3599 /*
3601 3600 * Finally, do the INIT_PORT for each port in turn
3602 3601 * When this command completes, the corresponding Hermon port
3603 3602 * will be physically "Up" and initialized.
3604 3603 */
3605 3604 for (i = 0; i < num_ports; i++) {
3606 3605 status = hermon_init_port_cmd_post(state, i + 1,
3607 3606 HERMON_CMD_NOSLEEP_SPIN);
3608 3607 if (status != HERMON_CMD_SUCCESS) {
3609 3608 cmn_err(CE_CONT, "Hermon: INIT_PORT (port %02d) "
3610 3609 "comman failed: %08x\n", i + 1, status);
3611 3610 goto init_ports_fail;
3612 3611 }
3613 3612 }
3614 3613
3615 3614 /* Free up the memory for Hermon port init struct(s), return success */
3616 3615 kmem_free(portinits, num_ports * sizeof (hermon_hw_set_port_t));
3617 3616 return (DDI_SUCCESS);
3618 3617
3619 3618 init_ports_fail:
3620 3619 /*
3621 3620 * Free up the memory for Hermon port init struct(s), shutdown any
3622 3621 * successfully initialized ports, and return failure
3623 3622 */
3624 3623 kmem_free(portinits, num_ports * sizeof (hermon_hw_set_port_t));
3625 3624 (void) hermon_hca_ports_shutdown(state, i);
3626 3625
3627 3626 return (DDI_FAILURE);
3628 3627 }
3629 3628
3630 3629
3631 3630 /*
3632 3631 * hermon_hca_ports_shutdown()
3633 3632 * Context: Only called from attach() and/or detach() path contexts
3634 3633 */
3635 3634 static int
3636 3635 hermon_hca_ports_shutdown(hermon_state_t *state, uint_t num_init)
3637 3636 {
3638 3637 int i, status;
3639 3638
3640 3639 /*
3641 3640 * Post commands to shutdown all init'd Hermon HCA ports. Note: if
3642 3641 * any of these commands fail for any reason, it would be entirely
3643 3642 * unexpected and probably indicative a serious problem (HW or SW).
3644 3643 * Although we do return void from this function, this type of failure
3645 3644 * should not go unreported. That is why we have the warning message.
3646 3645 */
3647 3646 for (i = 0; i < num_init; i++) {
3648 3647 status = hermon_close_port_cmd_post(state, i + 1,
3649 3648 HERMON_CMD_NOSLEEP_SPIN);
3650 3649 if (status != HERMON_CMD_SUCCESS) {
3651 3650 HERMON_WARNING(state, "failed to shutdown HCA port");
3652 3651 return (status);
3653 3652 }
3654 3653 }
3655 3654 return (HERMON_CMD_SUCCESS);
3656 3655 }
3657 3656
3658 3657
3659 3658 /*
3660 3659 * hermon_internal_uarpg_init
3661 3660 * Context: Only called from attach() path context
3662 3661 */
3663 3662 static int
3664 3663 hermon_internal_uarpg_init(hermon_state_t *state)
3665 3664 {
3666 3665 int status;
3667 3666 hermon_dbr_info_t *info;
3668 3667
3669 3668 /*
3670 3669 * Allocate the UAR page for kernel use. This UAR page is
3671 3670 * the privileged UAR page through which all kernel generated
3672 3671 * doorbells will be rung. There are a number of UAR pages
3673 3672 * reserved by hardware at the front of the UAR BAR, indicated
3674 3673 * by DEVCAP.num_rsvd_uar, which we have already allocated. So,
3675 3674 * the kernel page, or UAR page index num_rsvd_uar, will be
3676 3675 * allocated here for kernel use.
3677 3676 */
3678 3677
3679 3678 status = hermon_rsrc_alloc(state, HERMON_UARPG, 1, HERMON_SLEEP,
3680 3679 &state->hs_uarkpg_rsrc);
3681 3680 if (status != DDI_SUCCESS) {
3682 3681 return (DDI_FAILURE);
3683 3682 }
3684 3683
3685 3684 /* Setup pointer to kernel UAR page */
3686 3685 state->hs_uar = (hermon_hw_uar_t *)state->hs_uarkpg_rsrc->hr_addr;
3687 3686
3688 3687 /* need to set up DBr tracking as well */
3689 3688 status = hermon_dbr_page_alloc(state, &info);
3690 3689 if (status != DDI_SUCCESS) {
3691 3690 return (DDI_FAILURE);
3692 3691 }
3693 3692 state->hs_kern_dbr = info;
3694 3693 return (DDI_SUCCESS);
3695 3694 }
3696 3695
3697 3696
3698 3697 /*
3699 3698 * hermon_internal_uarpg_fini
3700 3699 * Context: Only called from attach() and/or detach() path contexts
3701 3700 */
3702 3701 static void
3703 3702 hermon_internal_uarpg_fini(hermon_state_t *state)
3704 3703 {
3705 3704 /* Free up Hermon UAR page #1 (kernel driver doorbells) */
3706 3705 hermon_rsrc_free(state, &state->hs_uarkpg_rsrc);
3707 3706 }
3708 3707
3709 3708
3710 3709 /*
3711 3710 * hermon_special_qp_contexts_reserve()
3712 3711 * Context: Only called from attach() path context
3713 3712 */
3714 3713 static int
3715 3714 hermon_special_qp_contexts_reserve(hermon_state_t *state)
3716 3715 {
3717 3716 hermon_rsrc_t *qp0_rsrc, *qp1_rsrc, *qp_resvd;
3718 3717 int status;
3719 3718
3720 3719 /* Initialize the lock used for special QP rsrc management */
3721 3720 mutex_init(&state->hs_spec_qplock, NULL, MUTEX_DRIVER,
3722 3721 DDI_INTR_PRI(state->hs_intrmsi_pri));
3723 3722
3724 3723 /*
3725 3724 * Reserve contexts for QP0. These QP contexts will be setup to
3726 3725 * act as aliases for the real QP0. Note: We are required to grab
3727 3726 * two QPs (one per port) even if we are operating in single-port
3728 3727 * mode.
3729 3728 */
3730 3729 status = hermon_rsrc_alloc(state, HERMON_QPC, 2,
3731 3730 HERMON_SLEEP, &qp0_rsrc);
3732 3731 if (status != DDI_SUCCESS) {
3733 3732 mutex_destroy(&state->hs_spec_qplock);
3734 3733 return (DDI_FAILURE);
3735 3734 }
3736 3735 state->hs_spec_qp0 = qp0_rsrc;
3737 3736
3738 3737 /*
3739 3738 * Reserve contexts for QP1. These QP contexts will be setup to
3740 3739 * act as aliases for the real QP1. Note: We are required to grab
3741 3740 * two QPs (one per port) even if we are operating in single-port
3742 3741 * mode.
3743 3742 */
3744 3743 status = hermon_rsrc_alloc(state, HERMON_QPC, 2,
3745 3744 HERMON_SLEEP, &qp1_rsrc);
3746 3745 if (status != DDI_SUCCESS) {
3747 3746 hermon_rsrc_free(state, &qp0_rsrc);
3748 3747 mutex_destroy(&state->hs_spec_qplock);
3749 3748 return (DDI_FAILURE);
3750 3749 }
3751 3750 state->hs_spec_qp1 = qp1_rsrc;
3752 3751
3753 3752 status = hermon_rsrc_alloc(state, HERMON_QPC, 4,
3754 3753 HERMON_SLEEP, &qp_resvd);
3755 3754 if (status != DDI_SUCCESS) {
3756 3755 hermon_rsrc_free(state, &qp1_rsrc);
3757 3756 hermon_rsrc_free(state, &qp0_rsrc);
3758 3757 mutex_destroy(&state->hs_spec_qplock);
3759 3758 return (DDI_FAILURE);
3760 3759 }
3761 3760 state->hs_spec_qp_unused = qp_resvd;
3762 3761
3763 3762 return (DDI_SUCCESS);
3764 3763 }
3765 3764
3766 3765
3767 3766 /*
3768 3767 * hermon_special_qp_contexts_unreserve()
3769 3768 * Context: Only called from attach() and/or detach() path contexts
3770 3769 */
3771 3770 static void
3772 3771 hermon_special_qp_contexts_unreserve(hermon_state_t *state)
3773 3772 {
3774 3773
3775 3774 /* Unreserve contexts for spec_qp_unused */
3776 3775 hermon_rsrc_free(state, &state->hs_spec_qp_unused);
3777 3776
3778 3777 /* Unreserve contexts for QP1 */
3779 3778 hermon_rsrc_free(state, &state->hs_spec_qp1);
3780 3779
3781 3780 /* Unreserve contexts for QP0 */
3782 3781 hermon_rsrc_free(state, &state->hs_spec_qp0);
3783 3782
3784 3783 /* Destroy the lock used for special QP rsrc management */
3785 3784 mutex_destroy(&state->hs_spec_qplock);
3786 3785
3787 3786 }
3788 3787
3789 3788
3790 3789 /*
3791 3790 * hermon_sw_reset()
3792 3791 * Context: Currently called only from attach() path context
3793 3792 */
3794 3793 static int
3795 3794 hermon_sw_reset(hermon_state_t *state)
3796 3795 {
3797 3796 ddi_acc_handle_t hdl = hermon_get_pcihdl(state);
3798 3797 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state);
3799 3798 uint32_t reset_delay;
3800 3799 int status, i;
3801 3800 uint32_t sem;
3802 3801 uint_t offset;
3803 3802 uint32_t data32; /* for devctl & linkctl */
3804 3803 int loopcnt;
3805 3804
3806 3805 /* initialize the FMA retry loop */
3807 3806 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
3808 3807 hermon_pio_init(fm_loop_cnt2, fm_status2, fm_test2);
3809 3808
3810 3809 /*
3811 3810 * If the configured software reset delay is set to zero, then we
3812 3811 * will not attempt a software reset of the Hermon device.
3813 3812 */
3814 3813 reset_delay = state->hs_cfg_profile->cp_sw_reset_delay;
3815 3814 if (reset_delay == 0) {
3816 3815 return (DDI_SUCCESS);
3817 3816 }
3818 3817
3819 3818 /* the FMA retry loop starts. */
3820 3819 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
3821 3820 fm_test);
3822 3821 hermon_pio_start(state, hdl, pio_error2, fm_loop_cnt2, fm_status2,
3823 3822 fm_test2);
3824 3823
3825 3824 /* Query the PCI capabilities of the HCA device */
3826 3825 /* but don't process the VPD until after reset */
3827 3826 status = hermon_pci_capability_list(state, hdl);
3828 3827 if (status != DDI_SUCCESS) {
3829 3828 cmn_err(CE_NOTE, "failed to get pci capabilities list(0x%x)\n",
3830 3829 status);
3831 3830 return (DDI_FAILURE);
3832 3831 }
3833 3832
3834 3833 /*
3835 3834 * Read all PCI config info (reg0...reg63). Note: According to the
3836 3835 * Hermon software reset application note, we should not read or
3837 3836 * restore the values in reg22 and reg23.
3838 3837 * NOTE: For Hermon (and Arbel too) it says to restore the command
3839 3838 * register LAST, and technically, you need to restore the
3840 3839 * PCIE Capability "device control" and "link control" (word-sized,
3841 3840 * at offsets 0x08 and 0x10 from the capbility ID respectively).
3842 3841 * We hold off restoring the command register - offset 0x4 - till last
3843 3842 */
3844 3843
3845 3844 /* 1st, wait for the semaphore assure accessibility - per PRM */
3846 3845 status = -1;
3847 3846 for (i = 0; i < NANOSEC/MICROSEC /* 1sec timeout */; i++) {
3848 3847 sem = ddi_get32(cmdhdl, state->hs_cmd_regs.sw_semaphore);
3849 3848 if (sem == 0) {
3850 3849 status = 0;
3851 3850 break;
3852 3851 }
3853 3852 drv_usecwait(1);
3854 3853 }
3855 3854
3856 3855 /* Check if timeout happens */
3857 3856 if (status == -1) {
3858 3857 /*
3859 3858 * Remove this acc handle from Hermon, then log
3860 3859 * the error.
3861 3860 */
3862 3861 hermon_pci_config_teardown(state, &hdl);
3863 3862
3864 3863 cmn_err(CE_WARN, "hermon_sw_reset timeout: "
3865 3864 "failed to get the semaphore(0x%p)\n",
3866 3865 (void *)state->hs_cmd_regs.sw_semaphore);
3867 3866
3868 3867 hermon_fm_ereport(state, HCA_IBA_ERR, HCA_ERR_NON_FATAL);
3869 3868 return (DDI_FAILURE);
3870 3869 }
3871 3870
3872 3871 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
3873 3872 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
3874 3873 (i != HERMON_SW_RESET_REG23_RSVD)) {
3875 3874 state->hs_cfg_data[i] = pci_config_get32(hdl, i << 2);
3876 3875 }
3877 3876 }
3878 3877
3879 3878 /*
3880 3879 * Perform the software reset (by writing 1 at offset 0xF0010)
3881 3880 */
3882 3881 ddi_put32(cmdhdl, state->hs_cmd_regs.sw_reset, HERMON_SW_RESET_START);
3883 3882
3884 3883 /*
3885 3884 * This delay is required so as not to cause a panic here. If the
3886 3885 * device is accessed too soon after reset it will not respond to
3887 3886 * config cycles, causing a Master Abort and panic.
3888 3887 */
3889 3888 drv_usecwait(reset_delay);
3890 3889
3891 3890 /*
3892 3891 * Poll waiting for the device to finish resetting.
3893 3892 */
3894 3893 loopcnt = 100; /* 100 times @ 100 usec - total delay 10 msec */
3895 3894 while ((pci_config_get32(hdl, 0) & 0x0000FFFF) != PCI_VENID_MLX) {
3896 3895 drv_usecwait(HERMON_SW_RESET_POLL_DELAY);
3897 3896 if (--loopcnt == 0)
3898 3897 break; /* just in case, break and go on */
3899 3898 }
3900 3899 if (loopcnt == 0)
3901 3900 cmn_err(CE_CONT, "!Never see VEND_ID - read == %X",
3902 3901 pci_config_get32(hdl, 0));
3903 3902
3904 3903 /*
3905 3904 * Restore the config info
3906 3905 */
3907 3906 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
3908 3907 if (i == 1) continue; /* skip the status/ctrl reg */
3909 3908 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
3910 3909 (i != HERMON_SW_RESET_REG23_RSVD)) {
3911 3910 pci_config_put32(hdl, i << 2, state->hs_cfg_data[i]);
3912 3911 }
3913 3912 }
3914 3913
3915 3914 /*
3916 3915 * PCI Express Capability - we saved during capability list, and
3917 3916 * we'll restore them here.
3918 3917 */
3919 3918 offset = state->hs_pci_cap_offset;
3920 3919 data32 = state->hs_pci_cap_devctl;
3921 3920 pci_config_put32(hdl, offset + HERMON_PCI_CAP_DEV_OFFS, data32);
3922 3921 data32 = state->hs_pci_cap_lnkctl;
3923 3922 pci_config_put32(hdl, offset + HERMON_PCI_CAP_LNK_OFFS, data32);
3924 3923
3925 3924 pci_config_put32(hdl, 0x04, (state->hs_cfg_data[1] | 0x0006));
3926 3925
3927 3926 /* the FMA retry loop ends. */
3928 3927 hermon_pio_end(state, hdl, pio_error2, fm_loop_cnt2, fm_status2,
3929 3928 fm_test2);
3930 3929 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
3931 3930 fm_test);
3932 3931
3933 3932 return (DDI_SUCCESS);
3934 3933
3935 3934 pio_error2:
3936 3935 /* fall through */
3937 3936 pio_error:
3938 3937 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL);
3939 3938 return (DDI_FAILURE);
3940 3939 }
3941 3940
3942 3941
3943 3942 /*
3944 3943 * hermon_mcg_init()
3945 3944 * Context: Only called from attach() path context
3946 3945 */
3947 3946 static int
3948 3947 hermon_mcg_init(hermon_state_t *state)
3949 3948 {
3950 3949 uint_t mcg_tmp_sz;
3951 3950
3952 3951
3953 3952 /*
3954 3953 * Allocate space for the MCG temporary copy buffer. This is
3955 3954 * used by the Attach/Detach Multicast Group code
3956 3955 */
3957 3956 mcg_tmp_sz = HERMON_MCGMEM_SZ(state);
3958 3957 state->hs_mcgtmp = kmem_zalloc(mcg_tmp_sz, KM_SLEEP);
3959 3958
3960 3959 /*
3961 3960 * Initialize the multicast group mutex. This ensures atomic
3962 3961 * access to add, modify, and remove entries in the multicast
3963 3962 * group hash lists.
3964 3963 */
3965 3964 mutex_init(&state->hs_mcglock, NULL, MUTEX_DRIVER,
3966 3965 DDI_INTR_PRI(state->hs_intrmsi_pri));
3967 3966
3968 3967 return (DDI_SUCCESS);
3969 3968 }
3970 3969
3971 3970
3972 3971 /*
3973 3972 * hermon_mcg_fini()
3974 3973 * Context: Only called from attach() and/or detach() path contexts
3975 3974 */
3976 3975 static void
3977 3976 hermon_mcg_fini(hermon_state_t *state)
3978 3977 {
3979 3978 uint_t mcg_tmp_sz;
3980 3979
3981 3980
3982 3981 /* Free up the space used for the MCG temporary copy buffer */
3983 3982 mcg_tmp_sz = HERMON_MCGMEM_SZ(state);
3984 3983 kmem_free(state->hs_mcgtmp, mcg_tmp_sz);
3985 3984
3986 3985 /* Destroy the multicast group mutex */
3987 3986 mutex_destroy(&state->hs_mcglock);
3988 3987
3989 3988 }
3990 3989
3991 3990
3992 3991 /*
3993 3992 * hermon_fw_version_check()
3994 3993 * Context: Only called from attach() path context
3995 3994 */
3996 3995 static int
3997 3996 hermon_fw_version_check(hermon_state_t *state)
3998 3997 {
3999 3998
4000 3999 uint_t hermon_fw_ver_major;
4001 4000 uint_t hermon_fw_ver_minor;
4002 4001 uint_t hermon_fw_ver_subminor;
4003 4002
4004 4003 #ifdef FMA_TEST
4005 4004 if (hermon_test_num == -1) {
4006 4005 return (DDI_FAILURE);
4007 4006 }
4008 4007 #endif
4009 4008
4010 4009 /*
4011 4010 * Depending on which version of driver we have attached, and which
4012 4011 * HCA we've attached, the firmware version checks will be different.
4013 4012 * We set up the comparison values for both Arbel and Sinai HCAs.
4014 4013 */
4015 4014 switch (state->hs_operational_mode) {
4016 4015 case HERMON_HCA_MODE:
4017 4016 hermon_fw_ver_major = HERMON_FW_VER_MAJOR;
4018 4017 hermon_fw_ver_minor = HERMON_FW_VER_MINOR;
4019 4018 hermon_fw_ver_subminor = HERMON_FW_VER_SUBMINOR;
4020 4019 break;
4021 4020
4022 4021 default:
4023 4022 return (DDI_FAILURE);
4024 4023 }
4025 4024
4026 4025 /*
4027 4026 * If FW revision major number is less than acceptable,
4028 4027 * return failure, else if greater return success. If
4029 4028 * the major numbers are equal than check the minor number
4030 4029 */
4031 4030 if (state->hs_fw.fw_rev_major < hermon_fw_ver_major) {
4032 4031 return (DDI_FAILURE);
4033 4032 } else if (state->hs_fw.fw_rev_major > hermon_fw_ver_major) {
4034 4033 return (DDI_SUCCESS);
4035 4034 }
4036 4035
4037 4036 /*
4038 4037 * Do the same check as above, except for minor revision numbers
4039 4038 * If the minor numbers are equal than check the subminor number
4040 4039 */
4041 4040 if (state->hs_fw.fw_rev_minor < hermon_fw_ver_minor) {
4042 4041 return (DDI_FAILURE);
4043 4042 } else if (state->hs_fw.fw_rev_minor > hermon_fw_ver_minor) {
4044 4043 return (DDI_SUCCESS);
4045 4044 }
4046 4045
4047 4046 /*
4048 4047 * Once again we do the same check as above, except for the subminor
4049 4048 * revision number. If the subminor numbers are equal here, then
4050 4049 * these are the same firmware version, return success
4051 4050 */
4052 4051 if (state->hs_fw.fw_rev_subminor < hermon_fw_ver_subminor) {
4053 4052 return (DDI_FAILURE);
4054 4053 } else if (state->hs_fw.fw_rev_subminor > hermon_fw_ver_subminor) {
4055 4054 return (DDI_SUCCESS);
4056 4055 }
4057 4056
4058 4057 return (DDI_SUCCESS);
4059 4058 }
4060 4059
4061 4060
4062 4061 /*
4063 4062 * hermon_device_info_report()
4064 4063 * Context: Only called from attach() path context
4065 4064 */
4066 4065 static void
4067 4066 hermon_device_info_report(hermon_state_t *state)
4068 4067 {
4069 4068
4070 4069 cmn_err(CE_CONT, "?hermon%d: FW ver: %04d.%04d.%04d, "
4071 4070 "HW rev: %02d\n", state->hs_instance, state->hs_fw.fw_rev_major,
4072 4071 state->hs_fw.fw_rev_minor, state->hs_fw.fw_rev_subminor,
4073 4072 state->hs_revision_id);
4074 4073 cmn_err(CE_CONT, "?hermon%d: %64s (0x%016" PRIx64 ")\n",
4075 4074 state->hs_instance, state->hs_nodedesc, state->hs_nodeguid);
4076 4075
4077 4076 }
4078 4077
4079 4078
4080 4079 /*
4081 4080 * hermon_pci_capability_list()
4082 4081 * Context: Only called from attach() path context
4083 4082 */
4084 4083 static int
4085 4084 hermon_pci_capability_list(hermon_state_t *state, ddi_acc_handle_t hdl)
4086 4085 {
4087 4086 uint_t offset, data;
4088 4087 uint32_t data32;
4089 4088
4090 4089 state->hs_pci_cap_offset = 0; /* make sure it's cleared */
4091 4090
4092 4091 /*
4093 4092 * Check for the "PCI Capabilities" bit in the "Status Register".
4094 4093 * Bit 4 in this register indicates the presence of a "PCI
4095 4094 * Capabilities" list.
4096 4095 *
4097 4096 * PCI-Express requires this bit to be set to 1.
4098 4097 */
4099 4098 data = pci_config_get16(hdl, 0x06);
4100 4099 if ((data & 0x10) == 0) {
4101 4100 return (DDI_FAILURE);
4102 4101 }
4103 4102
4104 4103 /*
4105 4104 * Starting from offset 0x34 in PCI config space, find the
4106 4105 * head of "PCI capabilities" list, and walk the list. If
4107 4106 * capabilities of a known type are encountered (e.g.
4108 4107 * "PCI-X Capability"), then call the appropriate handler
4109 4108 * function.
4110 4109 */
4111 4110 offset = pci_config_get8(hdl, 0x34);
4112 4111 while (offset != 0x0) {
4113 4112 data = pci_config_get8(hdl, offset);
4114 4113 /*
4115 4114 * Check for known capability types. Hermon has the
4116 4115 * following:
4117 4116 * o Power Mgmt (0x02)
4118 4117 * o VPD Capability (0x03)
4119 4118 * o PCI-E Capability (0x10)
4120 4119 * o MSIX Capability (0x11)
4121 4120 */
4122 4121 switch (data) {
4123 4122 case 0x01:
4124 4123 /* power mgmt handling */
4125 4124 break;
4126 4125 case 0x03:
4127 4126
4128 4127 /*
4129 4128 * Reading the PCIe VPD is inconsistent - that is, sometimes causes
4130 4129 * problems on (mostly) X64, though we've also seen problems w/ Sparc
4131 4130 * and Tavor --- so, for now until it's root caused, don't try and
4132 4131 * read it
4133 4132 */
4134 4133 #ifdef HERMON_VPD_WORKS
4135 4134 hermon_pci_capability_vpd(state, hdl, offset);
4136 4135 #else
4137 4136 delay(100);
4138 4137 hermon_pci_capability_vpd(state, hdl, offset);
4139 4138 #endif
4140 4139 break;
4141 4140 case 0x10:
4142 4141 /*
4143 4142 * PCI Express Capability - save offset & contents
4144 4143 * for later in reset
4145 4144 */
4146 4145 state->hs_pci_cap_offset = offset;
4147 4146 data32 = pci_config_get32(hdl,
4148 4147 offset + HERMON_PCI_CAP_DEV_OFFS);
4149 4148 state->hs_pci_cap_devctl = data32;
4150 4149 data32 = pci_config_get32(hdl,
4151 4150 offset + HERMON_PCI_CAP_LNK_OFFS);
4152 4151 state->hs_pci_cap_lnkctl = data32;
4153 4152 break;
4154 4153 case 0x11:
4155 4154 /*
4156 4155 * MSIX support - nothing to do, taken care of in the
4157 4156 * MSI/MSIX interrupt frameworkd
4158 4157 */
4159 4158 break;
4160 4159 default:
4161 4160 /* just go on to the next */
4162 4161 break;
4163 4162 }
4164 4163
4165 4164 /* Get offset of next entry in list */
4166 4165 offset = pci_config_get8(hdl, offset + 1);
4167 4166 }
4168 4167
4169 4168 return (DDI_SUCCESS);
4170 4169 }
4171 4170
4172 4171 /*
4173 4172 * hermon_pci_read_vpd()
4174 4173 * Context: Only called from attach() path context
4175 4174 * utility routine for hermon_pci_capability_vpd()
4176 4175 */
4177 4176 static int
4178 4177 hermon_pci_read_vpd(ddi_acc_handle_t hdl, uint_t offset, uint32_t addr,
4179 4178 uint32_t *data)
4180 4179 {
4181 4180 int retry = 40; /* retry counter for EEPROM poll */
4182 4181 uint32_t val;
4183 4182 int vpd_addr = offset + 2;
4184 4183 int vpd_data = offset + 4;
4185 4184
4186 4185 /*
4187 4186 * In order to read a 32-bit value from VPD, we are to write down
4188 4187 * the address (offset in the VPD itself) to the address register.
4189 4188 * To signal the read, we also clear bit 31. We then poll on bit 31
4190 4189 * and when it is set, we can then read our 4 bytes from the data
4191 4190 * register.
4192 4191 */
4193 4192 (void) pci_config_put32(hdl, offset, addr << 16);
4194 4193 do {
4195 4194 drv_usecwait(1000);
4196 4195 val = pci_config_get16(hdl, vpd_addr);
4197 4196 if (val & 0x8000) { /* flag bit set */
4198 4197 *data = pci_config_get32(hdl, vpd_data);
4199 4198 return (DDI_SUCCESS);
4200 4199 }
4201 4200 } while (--retry);
4202 4201 /* read of flag failed write one message but count the failures */
4203 4202 if (debug_vpd == 0)
4204 4203 cmn_err(CE_NOTE,
4205 4204 "!Failed to see flag bit after VPD addr write\n");
4206 4205 debug_vpd++;
4207 4206
4208 4207
4209 4208 vpd_read_fail:
4210 4209 return (DDI_FAILURE);
4211 4210 }
4212 4211
4213 4212
4214 4213
4215 4214 /*
4216 4215 * hermon_pci_capability_vpd()
4217 4216 * Context: Only called from attach() path context
4218 4217 */
4219 4218 static void
4220 4219 hermon_pci_capability_vpd(hermon_state_t *state, ddi_acc_handle_t hdl,
4221 4220 uint_t offset)
4222 4221 {
4223 4222 uint8_t name_length;
4224 4223 uint8_t pn_length;
4225 4224 int i, err = 0;
4226 4225 int vpd_str_id = 0;
4227 4226 int vpd_ro_desc;
4228 4227 int vpd_ro_pn_desc;
4229 4228 #ifdef _BIG_ENDIAN
4230 4229 uint32_t data32;
4231 4230 #endif /* _BIG_ENDIAN */
4232 4231 union {
4233 4232 uint32_t vpd_int[HERMON_VPD_HDR_DWSIZE];
4234 4233 uchar_t vpd_char[HERMON_VPD_HDR_BSIZE];
4235 4234 } vpd;
4236 4235
4237 4236
4238 4237 /*
4239 4238 * Read in the Vital Product Data (VPD) to the extend needed
4240 4239 * by the fwflash utility
4241 4240 */
4242 4241 for (i = 0; i < HERMON_VPD_HDR_DWSIZE; i++) {
4243 4242 err = hermon_pci_read_vpd(hdl, offset, i << 2, &vpd.vpd_int[i]);
4244 4243 if (err != DDI_SUCCESS) {
4245 4244 cmn_err(CE_NOTE, "!VPD read failed\n");
4246 4245 goto out;
4247 4246 }
4248 4247 }
4249 4248
4250 4249 #ifdef _BIG_ENDIAN
4251 4250 /* Need to swap bytes for big endian. */
4252 4251 for (i = 0; i < HERMON_VPD_HDR_DWSIZE; i++) {
4253 4252 data32 = vpd.vpd_int[i];
4254 4253 vpd.vpd_char[(i << 2) + 3] =
4255 4254 (uchar_t)((data32 & 0xFF000000) >> 24);
4256 4255 vpd.vpd_char[(i << 2) + 2] =
4257 4256 (uchar_t)((data32 & 0x00FF0000) >> 16);
4258 4257 vpd.vpd_char[(i << 2) + 1] =
4259 4258 (uchar_t)((data32 & 0x0000FF00) >> 8);
4260 4259 vpd.vpd_char[i << 2] = (uchar_t)(data32 & 0x000000FF);
4261 4260 }
4262 4261 #endif /* _BIG_ENDIAN */
4263 4262
4264 4263 /* Check for VPD String ID Tag */
4265 4264 if (vpd.vpd_char[vpd_str_id] == 0x82) {
4266 4265 /* get the product name */
4267 4266 name_length = (uint8_t)vpd.vpd_char[vpd_str_id + 1];
4268 4267 if (name_length > sizeof (state->hs_hca_name)) {
4269 4268 cmn_err(CE_NOTE, "!VPD name too large (0x%x)\n",
4270 4269 name_length);
4271 4270 goto out;
4272 4271 }
4273 4272 (void) memcpy(state->hs_hca_name, &vpd.vpd_char[vpd_str_id + 3],
4274 4273 name_length);
4275 4274 state->hs_hca_name[name_length] = 0;
4276 4275
4277 4276 /* get the part number */
4278 4277 vpd_ro_desc = name_length + 3; /* read-only tag location */
4279 4278 vpd_ro_pn_desc = vpd_ro_desc + 3; /* P/N keyword location */
4280 4279
4281 4280 /* Verify read-only tag and Part Number keyword. */
4282 4281 if (vpd.vpd_char[vpd_ro_desc] != 0x90 ||
4283 4282 (vpd.vpd_char[vpd_ro_pn_desc] != 'P' &&
4284 4283 vpd.vpd_char[vpd_ro_pn_desc + 1] != 'N')) {
4285 4284 cmn_err(CE_NOTE, "!VPD Part Number not found\n");
4286 4285 goto out;
4287 4286 }
4288 4287
4289 4288 pn_length = (uint8_t)vpd.vpd_char[vpd_ro_pn_desc + 2];
4290 4289 if (pn_length > sizeof (state->hs_hca_pn)) {
4291 4290 cmn_err(CE_NOTE, "!VPD part number too large (0x%x)\n",
4292 4291 name_length);
4293 4292 goto out;
4294 4293 }
4295 4294 (void) memcpy(state->hs_hca_pn,
4296 4295 &vpd.vpd_char[vpd_ro_pn_desc + 3],
4297 4296 pn_length);
4298 4297 state->hs_hca_pn[pn_length] = 0;
4299 4298 state->hs_hca_pn_len = pn_length;
4300 4299 cmn_err(CE_CONT, "!vpd %s\n", state->hs_hca_pn);
4301 4300 } else {
4302 4301 /* Wrong VPD String ID Tag */
4303 4302 cmn_err(CE_NOTE, "!VPD String ID Tag not found, tag: %02x\n",
4304 4303 vpd.vpd_char[0]);
4305 4304 goto out;
4306 4305 }
4307 4306 return;
4308 4307 out:
4309 4308 state->hs_hca_pn_len = 0;
4310 4309 }
4311 4310
4312 4311
4313 4312
4314 4313 /*
4315 4314 * hermon_intr_or_msi_init()
4316 4315 * Context: Only called from attach() path context
4317 4316 */
4318 4317 static int
4319 4318 hermon_intr_or_msi_init(hermon_state_t *state)
4320 4319 {
4321 4320 int status;
4322 4321
4323 4322 /* Query for the list of supported interrupt event types */
4324 4323 status = ddi_intr_get_supported_types(state->hs_dip,
4325 4324 &state->hs_intr_types_avail);
4326 4325 if (status != DDI_SUCCESS) {
4327 4326 return (DDI_FAILURE);
4328 4327 }
4329 4328
4330 4329 /*
4331 4330 * If Hermon supports MSI-X in this system (and, if it
4332 4331 * hasn't been overridden by a configuration variable), then
4333 4332 * the default behavior is to use a single MSI-X. Otherwise,
4334 4333 * fallback to using legacy interrupts. Also, if MSI-X is chosen,
4335 4334 * but fails for whatever reasons, then next try MSI
4336 4335 */
4337 4336 if ((state->hs_cfg_profile->cp_use_msi_if_avail != 0) &&
4338 4337 (state->hs_intr_types_avail & DDI_INTR_TYPE_MSIX)) {
4339 4338 status = hermon_add_intrs(state, DDI_INTR_TYPE_MSIX);
4340 4339 if (status == DDI_SUCCESS) {
4341 4340 state->hs_intr_type_chosen = DDI_INTR_TYPE_MSIX;
4342 4341 return (DDI_SUCCESS);
4343 4342 }
4344 4343 }
4345 4344
4346 4345 /*
4347 4346 * If Hermon supports MSI in this system (and, if it
4348 4347 * hasn't been overridden by a configuration variable), then
4349 4348 * the default behavior is to use a single MSIX. Otherwise,
4350 4349 * fallback to using legacy interrupts. Also, if MSI is chosen,
4351 4350 * but fails for whatever reasons, then fallback to using legacy
4352 4351 * interrupts.
4353 4352 */
4354 4353 if ((state->hs_cfg_profile->cp_use_msi_if_avail != 0) &&
4355 4354 (state->hs_intr_types_avail & DDI_INTR_TYPE_MSI)) {
4356 4355 status = hermon_add_intrs(state, DDI_INTR_TYPE_MSI);
4357 4356 if (status == DDI_SUCCESS) {
4358 4357 state->hs_intr_type_chosen = DDI_INTR_TYPE_MSI;
4359 4358 return (DDI_SUCCESS);
4360 4359 }
4361 4360 }
4362 4361
4363 4362 /*
4364 4363 * MSI interrupt allocation failed, or was not available. Fallback to
4365 4364 * legacy interrupt support.
4366 4365 */
4367 4366 if (state->hs_intr_types_avail & DDI_INTR_TYPE_FIXED) {
4368 4367 status = hermon_add_intrs(state, DDI_INTR_TYPE_FIXED);
4369 4368 if (status == DDI_SUCCESS) {
4370 4369 state->hs_intr_type_chosen = DDI_INTR_TYPE_FIXED;
4371 4370 return (DDI_SUCCESS);
4372 4371 }
4373 4372 }
4374 4373
4375 4374 /*
4376 4375 * None of MSI, MSI-X, nor legacy interrupts were successful.
4377 4376 * Return failure.
4378 4377 */
4379 4378 return (DDI_FAILURE);
4380 4379 }
4381 4380
4382 4381 /* ARGSUSED */
4383 4382 static int
4384 4383 hermon_intr_cb_handler(dev_info_t *dip, ddi_cb_action_t action, void *cbarg,
4385 4384 void *arg1, void *arg2)
4386 4385 {
4387 4386 hermon_state_t *state = (hermon_state_t *)arg1;
4388 4387
4389 4388 IBTF_DPRINTF_L2("hermon", "interrupt callback: instance %d, "
4390 4389 "action %d, cbarg %d\n", state->hs_instance, action,
4391 4390 (uint32_t)(uintptr_t)cbarg);
4392 4391 return (DDI_SUCCESS);
4393 4392 }
4394 4393
4395 4394 /*
4396 4395 * hermon_add_intrs()
4397 4396 * Context: Only called from attach() patch context
4398 4397 */
4399 4398 static int
4400 4399 hermon_add_intrs(hermon_state_t *state, int intr_type)
4401 4400 {
4402 4401 int status;
4403 4402
4404 4403 if (state->hs_intr_cb_hdl == NULL) {
4405 4404 status = ddi_cb_register(state->hs_dip, DDI_CB_FLAG_INTR,
4406 4405 hermon_intr_cb_handler, state, NULL,
4407 4406 &state->hs_intr_cb_hdl);
4408 4407 if (status != DDI_SUCCESS) {
4409 4408 cmn_err(CE_CONT, "ddi_cb_register failed: 0x%x\n",
4410 4409 status);
4411 4410 state->hs_intr_cb_hdl = NULL;
4412 4411 return (DDI_FAILURE);
4413 4412 }
4414 4413 }
4415 4414
4416 4415 /* Get number of interrupts/MSI supported */
4417 4416 status = ddi_intr_get_nintrs(state->hs_dip, intr_type,
4418 4417 &state->hs_intrmsi_count);
4419 4418 if (status != DDI_SUCCESS) {
4420 4419 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4421 4420 state->hs_intr_cb_hdl = NULL;
4422 4421 return (DDI_FAILURE);
4423 4422 }
4424 4423
4425 4424 /* Get number of available interrupts/MSI */
4426 4425 status = ddi_intr_get_navail(state->hs_dip, intr_type,
4427 4426 &state->hs_intrmsi_avail);
4428 4427 if (status != DDI_SUCCESS) {
4429 4428 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4430 4429 state->hs_intr_cb_hdl = NULL;
4431 4430 return (DDI_FAILURE);
4432 4431 }
4433 4432
4434 4433 /* Ensure that we have at least one (1) usable MSI or interrupt */
4435 4434 if ((state->hs_intrmsi_avail < 1) || (state->hs_intrmsi_count < 1)) {
4436 4435 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4437 4436 state->hs_intr_cb_hdl = NULL;
4438 4437 return (DDI_FAILURE);
4439 4438 }
4440 4439
4441 4440 /*
4442 4441 * Allocate the #interrupt/MSI handles.
4443 4442 * The number we request is the minimum of these three values:
4444 4443 * HERMON_MSIX_MAX driver maximum (array size)
4445 4444 * hermon_msix_max /etc/system override to...
4446 4445 * HERMON_MSIX_MAX
4447 4446 * state->hs_intrmsi_avail Maximum the ddi provides.
4448 4447 */
4449 4448 status = ddi_intr_alloc(state->hs_dip, &state->hs_intrmsi_hdl[0],
4450 4449 intr_type, 0, min(min(HERMON_MSIX_MAX, state->hs_intrmsi_avail),
4451 4450 hermon_msix_max), &state->hs_intrmsi_allocd, DDI_INTR_ALLOC_NORMAL);
4452 4451 if (status != DDI_SUCCESS) {
4453 4452 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4454 4453 state->hs_intr_cb_hdl = NULL;
4455 4454 return (DDI_FAILURE);
4456 4455 }
4457 4456
4458 4457 /* Ensure that we have allocated at least one (1) MSI or interrupt */
4459 4458 if (state->hs_intrmsi_allocd < 1) {
4460 4459 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4461 4460 state->hs_intr_cb_hdl = NULL;
4462 4461 return (DDI_FAILURE);
4463 4462 }
4464 4463
4465 4464 /*
4466 4465 * Extract the priority for the allocated interrupt/MSI. This
4467 4466 * will be used later when initializing certain mutexes.
4468 4467 */
4469 4468 status = ddi_intr_get_pri(state->hs_intrmsi_hdl[0],
4470 4469 &state->hs_intrmsi_pri);
4471 4470 if (status != DDI_SUCCESS) {
4472 4471 /* Free the allocated interrupt/MSI handle */
4473 4472 (void) ddi_intr_free(state->hs_intrmsi_hdl[0]);
4474 4473
4475 4474 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4476 4475 state->hs_intr_cb_hdl = NULL;
4477 4476 return (DDI_FAILURE);
4478 4477 }
4479 4478
4480 4479 /* Make sure the interrupt/MSI priority is below 'high level' */
4481 4480 if (state->hs_intrmsi_pri >= ddi_intr_get_hilevel_pri()) {
4482 4481 /* Free the allocated interrupt/MSI handle */
4483 4482 (void) ddi_intr_free(state->hs_intrmsi_hdl[0]);
4484 4483
4485 4484 return (DDI_FAILURE);
4486 4485 }
4487 4486
4488 4487 /* Get add'l capability information regarding interrupt/MSI */
4489 4488 status = ddi_intr_get_cap(state->hs_intrmsi_hdl[0],
4490 4489 &state->hs_intrmsi_cap);
4491 4490 if (status != DDI_SUCCESS) {
4492 4491 /* Free the allocated interrupt/MSI handle */
4493 4492 (void) ddi_intr_free(state->hs_intrmsi_hdl[0]);
4494 4493
4495 4494 return (DDI_FAILURE);
4496 4495 }
4497 4496
4498 4497 return (DDI_SUCCESS);
4499 4498 }
4500 4499
4501 4500
4502 4501 /*
4503 4502 * hermon_intr_or_msi_fini()
4504 4503 * Context: Only called from attach() and/or detach() path contexts
4505 4504 */
4506 4505 static int
4507 4506 hermon_intr_or_msi_fini(hermon_state_t *state)
4508 4507 {
4509 4508 int status;
4510 4509 int intr;
4511 4510
4512 4511 for (intr = 0; intr < state->hs_intrmsi_allocd; intr++) {
4513 4512
4514 4513 /* Free the allocated interrupt/MSI handle */
4515 4514 status = ddi_intr_free(state->hs_intrmsi_hdl[intr]);
4516 4515 if (status != DDI_SUCCESS) {
4517 4516 return (DDI_FAILURE);
4518 4517 }
4519 4518 }
4520 4519 if (state->hs_intr_cb_hdl) {
4521 4520 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4522 4521 state->hs_intr_cb_hdl = NULL;
4523 4522 }
4524 4523 return (DDI_SUCCESS);
4525 4524 }
4526 4525
4527 4526
4528 4527 /*ARGSUSED*/
4529 4528 void
4530 4529 hermon_pci_capability_msix(hermon_state_t *state, ddi_acc_handle_t hdl,
4531 4530 uint_t offset)
4532 4531 {
4533 4532 uint32_t msix_data;
4534 4533 uint16_t msg_cntr;
4535 4534 uint32_t t_offset; /* table offset */
4536 4535 uint32_t t_bir;
4537 4536 uint32_t p_offset; /* pba */
4538 4537 uint32_t p_bir;
4539 4538 int t_size; /* size in entries - each is 4 dwords */
4540 4539
4541 4540 /* come in with offset pointing at the capability structure */
4542 4541
4543 4542 msix_data = pci_config_get32(hdl, offset);
4544 4543 cmn_err(CE_CONT, "Full cap structure dword = %X\n", msix_data);
4545 4544 msg_cntr = pci_config_get16(hdl, offset+2);
4546 4545 cmn_err(CE_CONT, "MSIX msg_control = %X\n", msg_cntr);
4547 4546 offset += 4;
4548 4547 msix_data = pci_config_get32(hdl, offset); /* table info */
4549 4548 t_offset = (msix_data & 0xFFF8) >> 3;
4550 4549 t_bir = msix_data & 0x07;
4551 4550 offset += 4;
4552 4551 cmn_err(CE_CONT, " table %X --offset = %X, bir(bar) = %X\n",
4553 4552 msix_data, t_offset, t_bir);
4554 4553 msix_data = pci_config_get32(hdl, offset); /* PBA info */
4555 4554 p_offset = (msix_data & 0xFFF8) >> 3;
4556 4555 p_bir = msix_data & 0x07;
4557 4556
4558 4557 cmn_err(CE_CONT, " PBA %X --offset = %X, bir(bar) = %X\n",
4559 4558 msix_data, p_offset, p_bir);
4560 4559 t_size = msg_cntr & 0x7FF; /* low eleven bits */
4561 4560 cmn_err(CE_CONT, " table size = %X entries\n", t_size);
4562 4561
4563 4562 offset = t_offset; /* reuse this for offset from BAR */
4564 4563 #ifdef HERMON_SUPPORTS_MSIX_BAR
4565 4564 cmn_err(CE_CONT, "First 2 table entries behind BAR2 \n");
4566 4565 for (i = 0; i < 2; i++) {
4567 4566 for (j = 0; j < 4; j++, offset += 4) {
4568 4567 msix_data = ddi_get32(state->hs_reg_msihdl,
4569 4568 (uint32_t *)((uintptr_t)state->hs_reg_msi_baseaddr
4570 4569 + offset));
4571 4570 cmn_err(CE_CONT, "MSI table entry %d, dword %d == %X\n",
4572 4571 i, j, msix_data);
4573 4572 }
4574 4573 }
4575 4574 #endif
4576 4575
4577 4576 }
4578 4577
4579 4578 /*
4580 4579 * X86 fastreboot support functions.
4581 4580 * These functions are used to save/restore MSI-X table/PBA and also
4582 4581 * to disable MSI-X interrupts in hermon_quiesce().
4583 4582 */
4584 4583
4585 4584 /* Return the message control for MSI-X */
4586 4585 static ushort_t
4587 4586 get_msix_ctrl(dev_info_t *dip)
4588 4587 {
4589 4588 ushort_t msix_ctrl = 0, caps_ctrl = 0;
4590 4589 hermon_state_t *state = ddi_get_soft_state(hermon_statep,
4591 4590 DEVI(dip)->devi_instance);
4592 4591 ddi_acc_handle_t pci_cfg_hdl = hermon_get_pcihdl(state);
4593 4592 ASSERT(pci_cfg_hdl != NULL);
4594 4593
4595 4594 if ((PCI_CAP_LOCATE(pci_cfg_hdl,
4596 4595 PCI_CAP_ID_MSI_X, &caps_ctrl) == DDI_SUCCESS)) {
4597 4596 if ((msix_ctrl = PCI_CAP_GET16(pci_cfg_hdl, NULL, caps_ctrl,
4598 4597 PCI_MSIX_CTRL)) == PCI_CAP_EINVAL16)
4599 4598 return (0);
4600 4599 }
4601 4600 ASSERT(msix_ctrl != 0);
4602 4601
4603 4602 return (msix_ctrl);
4604 4603 }
4605 4604
4606 4605 /* Return the MSI-X table size */
4607 4606 static size_t
4608 4607 get_msix_tbl_size(dev_info_t *dip)
4609 4608 {
4610 4609 ushort_t msix_ctrl = get_msix_ctrl(dip);
4611 4610 ASSERT(msix_ctrl != 0);
4612 4611
4613 4612 return (((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 1) *
4614 4613 PCI_MSIX_VECTOR_SIZE);
4615 4614 }
4616 4615
4617 4616 /* Return the MSI-X PBA size */
4618 4617 static size_t
4619 4618 get_msix_pba_size(dev_info_t *dip)
4620 4619 {
4621 4620 ushort_t msix_ctrl = get_msix_ctrl(dip);
4622 4621 ASSERT(msix_ctrl != 0);
4623 4622
4624 4623 return (((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 64) / 64 * 8);
4625 4624 }
4626 4625
4627 4626 /* Set up the MSI-X table/PBA save area */
4628 4627 static void
4629 4628 hermon_set_msix_info(hermon_state_t *state)
4630 4629 {
4631 4630 uint_t rnumber, breg, nregs;
4632 4631 ushort_t caps_ctrl, msix_ctrl;
4633 4632 pci_regspec_t *rp;
4634 4633 int reg_size, addr_space, offset, *regs_list, i;
4635 4634
4636 4635 /*
4637 4636 * MSI-X BIR Index Table:
4638 4637 * BAR indicator register (BIR) to Base Address register.
4639 4638 */
4640 4639 uchar_t pci_msix_bir_index[8] = {0x10, 0x14, 0x18, 0x1c,
4641 4640 0x20, 0x24, 0xff, 0xff};
4642 4641
4643 4642 /* Fastreboot data access attribute */
4644 4643 ddi_device_acc_attr_t dev_attr = {
4645 4644 0, /* version */
4646 4645 DDI_STRUCTURE_LE_ACC,
4647 4646 DDI_STRICTORDER_ACC, /* attr access */
4648 4647 0
4649 4648 };
4650 4649
4651 4650 ddi_acc_handle_t pci_cfg_hdl = hermon_get_pcihdl(state);
4652 4651 ASSERT(pci_cfg_hdl != NULL);
4653 4652
4654 4653 if ((PCI_CAP_LOCATE(pci_cfg_hdl,
4655 4654 PCI_CAP_ID_MSI_X, &caps_ctrl) == DDI_SUCCESS)) {
4656 4655 if ((msix_ctrl = PCI_CAP_GET16(pci_cfg_hdl, NULL, caps_ctrl,
4657 4656 PCI_MSIX_CTRL)) == PCI_CAP_EINVAL16)
4658 4657 return;
4659 4658 }
4660 4659 ASSERT(msix_ctrl != 0);
4661 4660
4662 4661 state->hs_msix_tbl_offset = PCI_CAP_GET32(pci_cfg_hdl, NULL, caps_ctrl,
4663 4662 PCI_MSIX_TBL_OFFSET);
4664 4663
4665 4664 /* Get the BIR for MSI-X table */
4666 4665 breg = pci_msix_bir_index[state->hs_msix_tbl_offset &
4667 4666 PCI_MSIX_TBL_BIR_MASK];
4668 4667 ASSERT(breg != 0xFF);
4669 4668
4670 4669 /* Set the MSI-X table offset */
4671 4670 state->hs_msix_tbl_offset = state->hs_msix_tbl_offset &
4672 4671 ~PCI_MSIX_TBL_BIR_MASK;
4673 4672
4674 4673 /* Set the MSI-X table size */
4675 4674 state->hs_msix_tbl_size = ((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 1) *
4676 4675 PCI_MSIX_VECTOR_SIZE;
4677 4676
4678 4677 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, state->hs_dip,
4679 4678 DDI_PROP_DONTPASS, "reg", (int **)®s_list, &nregs) !=
4680 4679 DDI_PROP_SUCCESS) {
4681 4680 return;
4682 4681 }
4683 4682 reg_size = sizeof (pci_regspec_t) / sizeof (int);
4684 4683
4685 4684 /* Check the register number for MSI-X table */
4686 4685 for (i = 1, rnumber = 0; i < nregs/reg_size; i++) {
4687 4686 rp = (pci_regspec_t *)®s_list[i * reg_size];
4688 4687 addr_space = rp->pci_phys_hi & PCI_ADDR_MASK;
4689 4688 offset = PCI_REG_REG_G(rp->pci_phys_hi);
4690 4689
4691 4690 if ((offset == breg) && ((addr_space == PCI_ADDR_MEM32) ||
4692 4691 (addr_space == PCI_ADDR_MEM64))) {
4693 4692 rnumber = i;
4694 4693 break;
4695 4694 }
4696 4695 }
4697 4696 ASSERT(rnumber != 0);
4698 4697 state->hs_msix_tbl_rnumber = rnumber;
4699 4698
4700 4699 /* Set device attribute version and access according to Hermon FM */
4701 4700 dev_attr.devacc_attr_version = hermon_devacc_attr_version(state);
4702 4701 dev_attr.devacc_attr_access = hermon_devacc_attr_access(state);
4703 4702
4704 4703 /* Map the entire MSI-X vector table */
4705 4704 if (hermon_regs_map_setup(state, state->hs_msix_tbl_rnumber,
4706 4705 (caddr_t *)&state->hs_msix_tbl_addr, state->hs_msix_tbl_offset,
4707 4706 state->hs_msix_tbl_size, &dev_attr,
4708 4707 &state->hs_fm_msix_tblhdl) != DDI_SUCCESS) {
4709 4708 return;
4710 4709 }
4711 4710
4712 4711 state->hs_msix_pba_offset = PCI_CAP_GET32(pci_cfg_hdl, NULL, caps_ctrl,
4713 4712 PCI_MSIX_PBA_OFFSET);
4714 4713
4715 4714 /* Get the BIR for MSI-X PBA */
4716 4715 breg = pci_msix_bir_index[state->hs_msix_pba_offset &
4717 4716 PCI_MSIX_PBA_BIR_MASK];
4718 4717 ASSERT(breg != 0xFF);
4719 4718
4720 4719 /* Set the MSI-X PBA offset */
4721 4720 state->hs_msix_pba_offset = state->hs_msix_pba_offset &
4722 4721 ~PCI_MSIX_PBA_BIR_MASK;
4723 4722
4724 4723 /* Set the MSI-X PBA size */
4725 4724 state->hs_msix_pba_size =
4726 4725 ((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 64) / 64 * 8;
4727 4726
4728 4727 /* Check the register number for MSI-X PBA */
4729 4728 for (i = 1, rnumber = 0; i < nregs/reg_size; i++) {
4730 4729 rp = (pci_regspec_t *)®s_list[i * reg_size];
4731 4730 addr_space = rp->pci_phys_hi & PCI_ADDR_MASK;
4732 4731 offset = PCI_REG_REG_G(rp->pci_phys_hi);
4733 4732
4734 4733 if ((offset == breg) && ((addr_space == PCI_ADDR_MEM32) ||
4735 4734 (addr_space == PCI_ADDR_MEM64))) {
4736 4735 rnumber = i;
4737 4736 break;
4738 4737 }
4739 4738 }
4740 4739 ASSERT(rnumber != 0);
4741 4740 state->hs_msix_pba_rnumber = rnumber;
4742 4741 ddi_prop_free(regs_list);
4743 4742
4744 4743 /* Map in the MSI-X Pending Bit Array */
4745 4744 if (hermon_regs_map_setup(state, state->hs_msix_pba_rnumber,
4746 4745 (caddr_t *)&state->hs_msix_pba_addr, state->hs_msix_pba_offset,
4747 4746 state->hs_msix_pba_size, &dev_attr,
4748 4747 &state->hs_fm_msix_pbahdl) != DDI_SUCCESS) {
4749 4748 hermon_regs_map_free(state, &state->hs_fm_msix_tblhdl);
4750 4749 state->hs_fm_msix_tblhdl = NULL;
4751 4750 return;
4752 4751 }
4753 4752
4754 4753 /* Set the MSI-X table save area */
4755 4754 state->hs_msix_tbl_entries = kmem_alloc(state->hs_msix_tbl_size,
4756 4755 KM_SLEEP);
4757 4756
4758 4757 /* Set the MSI-X PBA save area */
4759 4758 state->hs_msix_pba_entries = kmem_alloc(state->hs_msix_pba_size,
4760 4759 KM_SLEEP);
4761 4760 }
4762 4761
4763 4762 /* Disable Hermon interrupts */
4764 4763 static int
4765 4764 hermon_intr_disable(hermon_state_t *state)
4766 4765 {
4767 4766 ushort_t msix_ctrl = 0, caps_ctrl = 0;
4768 4767 ddi_acc_handle_t pci_cfg_hdl = hermon_get_pcihdl(state);
4769 4768 ddi_acc_handle_t msix_tblhdl = hermon_get_msix_tblhdl(state);
4770 4769 int i, j;
4771 4770 ASSERT(pci_cfg_hdl != NULL && msix_tblhdl != NULL);
4772 4771 ASSERT(state->hs_intr_types_avail &
4773 4772 (DDI_INTR_TYPE_FIXED | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX));
4774 4773
4775 4774 /*
4776 4775 * Check if MSI-X interrupts are used. If so, disable MSI-X interupts.
4777 4776 * If not, since Hermon doesn't support MSI interrupts, assuming the
4778 4777 * legacy interrupt is used instead, disable the legacy interrupt.
4779 4778 */
4780 4779 if ((state->hs_cfg_profile->cp_use_msi_if_avail != 0) &&
4781 4780 (state->hs_intr_types_avail & DDI_INTR_TYPE_MSIX)) {
4782 4781
4783 4782 if ((PCI_CAP_LOCATE(pci_cfg_hdl,
4784 4783 PCI_CAP_ID_MSI_X, &caps_ctrl) == DDI_SUCCESS)) {
4785 4784 if ((msix_ctrl = PCI_CAP_GET16(pci_cfg_hdl, NULL,
4786 4785 caps_ctrl, PCI_MSIX_CTRL)) == PCI_CAP_EINVAL16)
4787 4786 return (DDI_FAILURE);
4788 4787 }
4789 4788 ASSERT(msix_ctrl != 0);
4790 4789
4791 4790 if (!(msix_ctrl & PCI_MSIX_ENABLE_BIT))
4792 4791 return (DDI_SUCCESS);
4793 4792
4794 4793 /* Clear all inums in MSI-X table */
4795 4794 for (i = 0; i < get_msix_tbl_size(state->hs_dip);
4796 4795 i += PCI_MSIX_VECTOR_SIZE) {
4797 4796 for (j = 0; j < PCI_MSIX_VECTOR_SIZE; j += 4) {
4798 4797 char *addr = state->hs_msix_tbl_addr + i + j;
4799 4798 ddi_put32(msix_tblhdl,
4800 4799 (uint32_t *)(uintptr_t)addr, 0x0);
4801 4800 }
4802 4801 }
4803 4802
4804 4803 /* Disable MSI-X interrupts */
4805 4804 msix_ctrl &= ~PCI_MSIX_ENABLE_BIT;
4806 4805 PCI_CAP_PUT16(pci_cfg_hdl, NULL, caps_ctrl, PCI_MSIX_CTRL,
4807 4806 msix_ctrl);
4808 4807
4809 4808 } else {
4810 4809 uint16_t cmdreg = pci_config_get16(pci_cfg_hdl, PCI_CONF_COMM);
4811 4810 ASSERT(state->hs_intr_types_avail & DDI_INTR_TYPE_FIXED);
4812 4811
4813 4812 /* Disable the legacy interrupts */
4814 4813 cmdreg |= PCI_COMM_INTX_DISABLE;
4815 4814 pci_config_put16(pci_cfg_hdl, PCI_CONF_COMM, cmdreg);
4816 4815 }
4817 4816
4818 4817 return (DDI_SUCCESS);
4819 4818 }
4820 4819
4821 4820 /* Hermon quiesce(9F) entry */
4822 4821 static int
4823 4822 hermon_quiesce(dev_info_t *dip)
4824 4823 {
4825 4824 hermon_state_t *state = ddi_get_soft_state(hermon_statep,
4826 4825 DEVI(dip)->devi_instance);
4827 4826 ddi_acc_handle_t pcihdl = hermon_get_pcihdl(state);
4828 4827 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state);
4829 4828 ddi_acc_handle_t msix_tbl_hdl = hermon_get_msix_tblhdl(state);
4830 4829 ddi_acc_handle_t msix_pba_hdl = hermon_get_msix_pbahdl(state);
4831 4830 uint32_t sem, reset_delay = state->hs_cfg_profile->cp_sw_reset_delay;
4832 4831 uint64_t data64;
4833 4832 uint32_t data32;
4834 4833 int status, i, j, loopcnt;
4835 4834 uint_t offset;
4836 4835
4837 4836 ASSERT(state != NULL);
4838 4837
4839 4838 /* start fastreboot */
4840 4839 state->hs_quiescing = B_TRUE;
4841 4840
4842 4841 /* If it's in maintenance mode, do nothing but return with SUCCESS */
4843 4842 if (!HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
4844 4843 return (DDI_SUCCESS);
4845 4844 }
4846 4845
4847 4846 /* suppress Hermon FM ereports */
4848 4847 if (hermon_get_state(state) & HCA_EREPORT_FM) {
4849 4848 hermon_clr_state_nolock(state, HCA_EREPORT_FM);
4850 4849 }
4851 4850
4852 4851 /* Shutdown HCA ports */
4853 4852 if (hermon_hca_ports_shutdown(state,
4854 4853 state->hs_cfg_profile->cp_num_ports) != HERMON_CMD_SUCCESS) {
4855 4854 state->hs_quiescing = B_FALSE;
4856 4855 return (DDI_FAILURE);
4857 4856 }
4858 4857
4859 4858 /* Close HCA */
4860 4859 if (hermon_close_hca_cmd_post(state, HERMON_CMD_NOSLEEP_SPIN) !=
4861 4860 HERMON_CMD_SUCCESS) {
4862 4861 state->hs_quiescing = B_FALSE;
4863 4862 return (DDI_FAILURE);
4864 4863 }
4865 4864
4866 4865 /* Disable interrupts */
4867 4866 if (hermon_intr_disable(state) != DDI_SUCCESS) {
4868 4867 state->hs_quiescing = B_FALSE;
4869 4868 return (DDI_FAILURE);
4870 4869 }
4871 4870
4872 4871 /*
4873 4872 * Query the PCI capabilities of the HCA device, but don't process
4874 4873 * the VPD until after reset.
4875 4874 */
4876 4875 if (hermon_pci_capability_list(state, pcihdl) != DDI_SUCCESS) {
4877 4876 state->hs_quiescing = B_FALSE;
4878 4877 return (DDI_FAILURE);
4879 4878 }
4880 4879
4881 4880 /*
4882 4881 * Read all PCI config info (reg0...reg63). Note: According to the
4883 4882 * Hermon software reset application note, we should not read or
4884 4883 * restore the values in reg22 and reg23.
4885 4884 * NOTE: For Hermon (and Arbel too) it says to restore the command
4886 4885 * register LAST, and technically, you need to restore the
4887 4886 * PCIE Capability "device control" and "link control" (word-sized,
4888 4887 * at offsets 0x08 and 0x10 from the capbility ID respectively).
4889 4888 * We hold off restoring the command register - offset 0x4 - till last
4890 4889 */
4891 4890
4892 4891 /* 1st, wait for the semaphore assure accessibility - per PRM */
4893 4892 status = -1;
4894 4893 for (i = 0; i < NANOSEC/MICROSEC /* 1sec timeout */; i++) {
4895 4894 sem = ddi_get32(cmdhdl, state->hs_cmd_regs.sw_semaphore);
4896 4895 if (sem == 0) {
4897 4896 status = 0;
4898 4897 break;
4899 4898 }
4900 4899 drv_usecwait(1);
4901 4900 }
4902 4901
4903 4902 /* Check if timeout happens */
4904 4903 if (status == -1) {
4905 4904 state->hs_quiescing = B_FALSE;
4906 4905 return (DDI_FAILURE);
4907 4906 }
4908 4907
4909 4908 /* MSI-X interrupts are used, save the MSI-X table */
4910 4909 if (msix_tbl_hdl && msix_pba_hdl) {
4911 4910 /* save MSI-X table */
4912 4911 for (i = 0; i < get_msix_tbl_size(state->hs_dip);
4913 4912 i += PCI_MSIX_VECTOR_SIZE) {
4914 4913 for (j = 0; j < PCI_MSIX_VECTOR_SIZE; j += 4) {
4915 4914 char *addr = state->hs_msix_tbl_addr + i + j;
4916 4915 data32 = ddi_get32(msix_tbl_hdl,
4917 4916 (uint32_t *)(uintptr_t)addr);
4918 4917 *(uint32_t *)(uintptr_t)(state->
4919 4918 hs_msix_tbl_entries + i + j) = data32;
4920 4919 }
4921 4920 }
4922 4921 /* save MSI-X PBA */
4923 4922 for (i = 0; i < get_msix_pba_size(state->hs_dip); i += 8) {
4924 4923 char *addr = state->hs_msix_pba_addr + i;
4925 4924 data64 = ddi_get64(msix_pba_hdl,
4926 4925 (uint64_t *)(uintptr_t)addr);
4927 4926 *(uint64_t *)(uintptr_t)(state->
4928 4927 hs_msix_pba_entries + i) = data64;
4929 4928 }
4930 4929 }
4931 4930
4932 4931 /* save PCI config space */
4933 4932 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
4934 4933 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
4935 4934 (i != HERMON_SW_RESET_REG23_RSVD)) {
4936 4935 state->hs_cfg_data[i] =
4937 4936 pci_config_get32(pcihdl, i << 2);
4938 4937 }
4939 4938 }
4940 4939
4941 4940 /* SW-reset HCA */
4942 4941 ddi_put32(cmdhdl, state->hs_cmd_regs.sw_reset, HERMON_SW_RESET_START);
4943 4942
4944 4943 /*
4945 4944 * This delay is required so as not to cause a panic here. If the
4946 4945 * device is accessed too soon after reset it will not respond to
4947 4946 * config cycles, causing a Master Abort and panic.
4948 4947 */
4949 4948 drv_usecwait(reset_delay);
4950 4949
4951 4950 /* Poll waiting for the device to finish resetting */
4952 4951 loopcnt = 100; /* 100 times @ 100 usec - total delay 10 msec */
4953 4952 while ((pci_config_get32(pcihdl, 0) & 0x0000FFFF) != PCI_VENID_MLX) {
4954 4953 drv_usecwait(HERMON_SW_RESET_POLL_DELAY);
4955 4954 if (--loopcnt == 0)
4956 4955 break; /* just in case, break and go on */
4957 4956 }
4958 4957 if (loopcnt == 0) {
4959 4958 state->hs_quiescing = B_FALSE;
4960 4959 return (DDI_FAILURE);
4961 4960 }
4962 4961
4963 4962 /* Restore the config info */
4964 4963 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
4965 4964 if (i == 1) continue; /* skip the status/ctrl reg */
4966 4965 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
4967 4966 (i != HERMON_SW_RESET_REG23_RSVD)) {
4968 4967 pci_config_put32(pcihdl, i << 2, state->hs_cfg_data[i]);
4969 4968 }
4970 4969 }
4971 4970
4972 4971 /* If MSI-X interrupts are used, restore the MSI-X table */
4973 4972 if (msix_tbl_hdl && msix_pba_hdl) {
4974 4973 /* restore MSI-X PBA */
4975 4974 for (i = 0; i < get_msix_pba_size(state->hs_dip); i += 8) {
4976 4975 char *addr = state->hs_msix_pba_addr + i;
4977 4976 data64 = *(uint64_t *)(uintptr_t)
4978 4977 (state->hs_msix_pba_entries + i);
4979 4978 ddi_put64(msix_pba_hdl,
4980 4979 (uint64_t *)(uintptr_t)addr, data64);
4981 4980 }
4982 4981 /* restore MSI-X table */
4983 4982 for (i = 0; i < get_msix_tbl_size(state->hs_dip);
4984 4983 i += PCI_MSIX_VECTOR_SIZE) {
4985 4984 for (j = 0; j < PCI_MSIX_VECTOR_SIZE; j += 4) {
4986 4985 char *addr = state->hs_msix_tbl_addr + i + j;
4987 4986 data32 = *(uint32_t *)(uintptr_t)
4988 4987 (state->hs_msix_tbl_entries + i + j);
4989 4988 ddi_put32(msix_tbl_hdl,
4990 4989 (uint32_t *)(uintptr_t)addr, data32);
4991 4990 }
4992 4991 }
4993 4992 }
4994 4993
4995 4994 /*
4996 4995 * PCI Express Capability - we saved during capability list, and
4997 4996 * we'll restore them here.
4998 4997 */
4999 4998 offset = state->hs_pci_cap_offset;
5000 4999 data32 = state->hs_pci_cap_devctl;
5001 5000 pci_config_put32(pcihdl, offset + HERMON_PCI_CAP_DEV_OFFS, data32);
5002 5001 data32 = state->hs_pci_cap_lnkctl;
5003 5002 pci_config_put32(pcihdl, offset + HERMON_PCI_CAP_LNK_OFFS, data32);
5004 5003
5005 5004 /* restore the command register */
5006 5005 pci_config_put32(pcihdl, 0x04, (state->hs_cfg_data[1] | 0x0006));
5007 5006
5008 5007 return (DDI_SUCCESS);
5009 5008 }
↓ open down ↓ |
4816 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX