Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon.c
28 28 * Hermon (InfiniBand) HCA Driver attach/detach Routines
29 29 *
30 30 * Implements all the routines necessary for the attach, setup,
31 31 * initialization (and subsequent possible teardown and detach) of the
32 32 * Hermon InfiniBand HCA driver.
33 33 */
34 34
35 35 #include <sys/types.h>
36 36 #include <sys/file.h>
37 37 #include <sys/open.h>
38 38 #include <sys/conf.h>
39 39 #include <sys/ddi.h>
40 40 #include <sys/sunddi.h>
41 41 #include <sys/modctl.h>
42 42 #include <sys/stat.h>
43 43 #include <sys/pci.h>
44 44 #include <sys/pci_cap.h>
45 45 #include <sys/bitmap.h>
46 46 #include <sys/policy.h>
47 47
48 48 #include <sys/ib/adapters/hermon/hermon.h>
49 49
50 50 /* /etc/system can tune this down, if that is desirable. */
51 51 int hermon_msix_max = HERMON_MSIX_MAX;
52 52
53 53 /* The following works around a problem in pre-2_7_000 firmware. */
54 54 #define HERMON_FW_WORKAROUND
55 55
56 56 int hermon_verbose = 0;
57 57
58 58 /* Hermon HCA State Pointer */
59 59 void *hermon_statep;
60 60
61 61 int debug_vpd = 0;
62 62
63 63 /* Disable the internal error-check polling thread */
64 64 int hermon_no_inter_err_chk = 0;
65 65
66 66 /*
67 67 * The Hermon "userland resource database" is common to instances of the
68 68 * Hermon HCA driver. This structure "hermon_userland_rsrc_db" contains all
69 69 * the necessary information to maintain it.
70 70 */
71 71 hermon_umap_db_t hermon_userland_rsrc_db;
72 72
73 73 static int hermon_attach(dev_info_t *, ddi_attach_cmd_t);
74 74 static int hermon_detach(dev_info_t *, ddi_detach_cmd_t);
75 75 static int hermon_open(dev_t *, int, int, cred_t *);
76 76 static int hermon_close(dev_t, int, int, cred_t *);
77 77 static int hermon_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
78 78
79 79 static int hermon_drv_init(hermon_state_t *state, dev_info_t *dip,
80 80 int instance);
81 81 static void hermon_drv_fini(hermon_state_t *state);
82 82 static void hermon_drv_fini2(hermon_state_t *state);
83 83 static int hermon_isr_init(hermon_state_t *state);
84 84 static void hermon_isr_fini(hermon_state_t *state);
85 85
86 86 static int hermon_hw_init(hermon_state_t *state);
87 87
88 88 static void hermon_hw_fini(hermon_state_t *state,
89 89 hermon_drv_cleanup_level_t cleanup);
90 90 static int hermon_soft_state_init(hermon_state_t *state);
91 91 static void hermon_soft_state_fini(hermon_state_t *state);
92 92 static int hermon_icm_config_setup(hermon_state_t *state,
93 93 hermon_hw_initqueryhca_t *inithca);
94 94 static void hermon_icm_tables_init(hermon_state_t *state);
95 95 static void hermon_icm_tables_fini(hermon_state_t *state);
96 96 static int hermon_icm_dma_init(hermon_state_t *state);
97 97 static void hermon_icm_dma_fini(hermon_state_t *state);
98 98 static void hermon_inithca_set(hermon_state_t *state,
99 99 hermon_hw_initqueryhca_t *inithca);
100 100 static int hermon_hca_port_init(hermon_state_t *state);
101 101 static int hermon_hca_ports_shutdown(hermon_state_t *state, uint_t num_init);
102 102 static int hermon_internal_uarpg_init(hermon_state_t *state);
103 103 static void hermon_internal_uarpg_fini(hermon_state_t *state);
104 104 static int hermon_special_qp_contexts_reserve(hermon_state_t *state);
105 105 static void hermon_special_qp_contexts_unreserve(hermon_state_t *state);
106 106 static int hermon_sw_reset(hermon_state_t *state);
107 107 static int hermon_mcg_init(hermon_state_t *state);
108 108 static void hermon_mcg_fini(hermon_state_t *state);
109 109 static int hermon_fw_version_check(hermon_state_t *state);
110 110 static void hermon_device_info_report(hermon_state_t *state);
111 111 static int hermon_pci_capability_list(hermon_state_t *state,
112 112 ddi_acc_handle_t hdl);
113 113 static void hermon_pci_capability_vpd(hermon_state_t *state,
114 114 ddi_acc_handle_t hdl, uint_t offset);
115 115 static int hermon_pci_read_vpd(ddi_acc_handle_t hdl, uint_t offset,
116 116 uint32_t addr, uint32_t *data);
117 117 static int hermon_intr_or_msi_init(hermon_state_t *state);
118 118 static int hermon_add_intrs(hermon_state_t *state, int intr_type);
119 119 static int hermon_intr_or_msi_fini(hermon_state_t *state);
120 120 void hermon_pci_capability_msix(hermon_state_t *state, ddi_acc_handle_t hdl,
121 121 uint_t offset);
122 122
123 123 static uint64_t hermon_size_icm(hermon_state_t *state);
124 124
125 125 /* X86 fastreboot support */
126 126 static ushort_t get_msix_ctrl(dev_info_t *);
127 127 static size_t get_msix_tbl_size(dev_info_t *);
128 128 static size_t get_msix_pba_size(dev_info_t *);
129 129 static void hermon_set_msix_info(hermon_state_t *);
130 130 static int hermon_intr_disable(hermon_state_t *);
131 131 static int hermon_quiesce(dev_info_t *);
132 132
133 133
134 134 /* Character/Block Operations */
135 135 static struct cb_ops hermon_cb_ops = {
136 136 hermon_open, /* open */
137 137 hermon_close, /* close */
138 138 nodev, /* strategy (block) */
139 139 nodev, /* print (block) */
140 140 nodev, /* dump (block) */
141 141 nodev, /* read */
142 142 nodev, /* write */
143 143 hermon_ioctl, /* ioctl */
144 144 hermon_devmap, /* devmap */
145 145 NULL, /* mmap */
146 146 nodev, /* segmap */
147 147 nochpoll, /* chpoll */
148 148 ddi_prop_op, /* prop_op */
149 149 NULL, /* streams */
150 150 D_NEW | D_MP |
151 151 D_64BIT | D_HOTPLUG |
152 152 D_DEVMAP, /* flags */
153 153 CB_REV /* rev */
154 154 };
155 155
156 156 /* Driver Operations */
157 157 static struct dev_ops hermon_ops = {
158 158 DEVO_REV, /* struct rev */
159 159 0, /* refcnt */
160 160 hermon_getinfo, /* getinfo */
161 161 nulldev, /* identify */
162 162 nulldev, /* probe */
163 163 hermon_attach, /* attach */
164 164 hermon_detach, /* detach */
165 165 nodev, /* reset */
166 166 &hermon_cb_ops, /* cb_ops */
167 167 NULL, /* bus_ops */
168 168 nodev, /* power */
169 169 hermon_quiesce, /* devo_quiesce */
170 170 };
171 171
172 172 /* Module Driver Info */
173 173 static struct modldrv hermon_modldrv = {
174 174 &mod_driverops,
175 175 "ConnectX IB Driver",
176 176 &hermon_ops
177 177 };
178 178
179 179 /* Module Linkage */
180 180 static struct modlinkage hermon_modlinkage = {
181 181 MODREV_1,
182 182 &hermon_modldrv,
183 183 NULL
184 184 };
185 185
186 186 /*
187 187 * This extern refers to the ibc_operations_t function vector that is defined
188 188 * in the hermon_ci.c file.
189 189 */
190 190 extern ibc_operations_t hermon_ibc_ops;
191 191
192 192 /*
193 193 * _init()
194 194 */
195 195 int
196 196 _init()
197 197 {
198 198 int status;
199 199
200 200 status = ddi_soft_state_init(&hermon_statep, sizeof (hermon_state_t),
201 201 (size_t)HERMON_INITIAL_STATES);
202 202 if (status != 0) {
203 203 return (status);
204 204 }
205 205
206 206 status = ibc_init(&hermon_modlinkage);
207 207 if (status != 0) {
208 208 ddi_soft_state_fini(&hermon_statep);
209 209 return (status);
210 210 }
211 211
212 212 status = mod_install(&hermon_modlinkage);
213 213 if (status != 0) {
214 214 ibc_fini(&hermon_modlinkage);
215 215 ddi_soft_state_fini(&hermon_statep);
216 216 return (status);
217 217 }
218 218
219 219 /* Initialize the Hermon "userland resources database" */
220 220 hermon_umap_db_init();
221 221
222 222 return (status);
223 223 }
224 224
225 225
226 226 /*
227 227 * _info()
228 228 */
229 229 int
230 230 _info(struct modinfo *modinfop)
231 231 {
232 232 int status;
233 233
234 234 status = mod_info(&hermon_modlinkage, modinfop);
235 235 return (status);
236 236 }
237 237
238 238
239 239 /*
240 240 * _fini()
241 241 */
242 242 int
243 243 _fini()
244 244 {
245 245 int status;
246 246
247 247 status = mod_remove(&hermon_modlinkage);
248 248 if (status != 0) {
249 249 return (status);
250 250 }
251 251
252 252 /* Destroy the Hermon "userland resources database" */
253 253 hermon_umap_db_fini();
254 254
255 255 ibc_fini(&hermon_modlinkage);
256 256 ddi_soft_state_fini(&hermon_statep);
257 257
258 258 return (status);
259 259 }
260 260
261 261
262 262 /*
263 263 * hermon_getinfo()
264 264 */
265 265 /* ARGSUSED */
266 266 static int
267 267 hermon_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
268 268 {
269 269 dev_t dev;
270 270 hermon_state_t *state;
271 271 minor_t instance;
272 272
273 273 switch (cmd) {
274 274 case DDI_INFO_DEVT2DEVINFO:
275 275 dev = (dev_t)arg;
276 276 instance = HERMON_DEV_INSTANCE(dev);
277 277 state = ddi_get_soft_state(hermon_statep, instance);
278 278 if (state == NULL) {
279 279 return (DDI_FAILURE);
280 280 }
281 281 *result = (void *)state->hs_dip;
282 282 return (DDI_SUCCESS);
283 283
284 284 case DDI_INFO_DEVT2INSTANCE:
285 285 dev = (dev_t)arg;
286 286 instance = HERMON_DEV_INSTANCE(dev);
287 287 *result = (void *)(uintptr_t)instance;
288 288 return (DDI_SUCCESS);
289 289
290 290 default:
291 291 break;
292 292 }
293 293
294 294 return (DDI_FAILURE);
295 295 }
296 296
297 297
298 298 /*
299 299 * hermon_open()
300 300 */
301 301 /* ARGSUSED */
302 302 static int
303 303 hermon_open(dev_t *devp, int flag, int otyp, cred_t *credp)
304 304 {
305 305 hermon_state_t *state;
306 306 hermon_rsrc_t *rsrcp;
307 307 hermon_umap_db_entry_t *umapdb, *umapdb2;
308 308 minor_t instance;
309 309 uint64_t key, value;
310 310 uint_t hr_indx;
311 311 dev_t dev;
312 312 int status;
313 313
314 314 instance = HERMON_DEV_INSTANCE(*devp);
315 315 state = ddi_get_soft_state(hermon_statep, instance);
316 316 if (state == NULL) {
317 317 return (ENXIO);
318 318 }
319 319
320 320 /*
321 321 * Only allow driver to be opened for character access, and verify
322 322 * whether exclusive access is allowed.
323 323 */
324 324 if ((otyp != OTYP_CHR) || ((flag & FEXCL) &&
325 325 secpolicy_excl_open(credp) != 0)) {
326 326 return (EINVAL);
327 327 }
328 328
329 329 /*
330 330 * Search for the current process PID in the "userland resources
331 331 * database". If it is not found, then attempt to allocate a UAR
332 332 * page and add the ("key", "value") pair to the database.
333 333 * Note: As a last step we always return a devp appropriate for
334 334 * the open. Either we return a new minor number (based on the
335 335 * instance and the UAR page index) or we return the current minor
336 336 * number for the given client process.
337 337 *
338 338 * We also add an entry to the database to allow for lookup from
339 339 * "dev_t" to the current process PID. This is necessary because,
340 340 * under certain circumstance, the process PID that calls the Hermon
341 341 * close() entry point may not be the same as the one who called
342 342 * open(). Specifically, this can happen if a child process calls
343 343 * the Hermon's open() entry point, gets a UAR page, maps it out (using
344 344 * mmap()), and then exits without calling munmap(). Because mmap()
345 345 * adds a reference to the file descriptor, at the exit of the child
346 346 * process the file descriptor is "inherited" by the parent (and will
347 347 * be close()'d by the parent's PID only when it exits).
348 348 *
349 349 * Note: We use the hermon_umap_db_find_nolock() and
350 350 * hermon_umap_db_add_nolock() database access routines below (with
351 351 * an explicit mutex_enter of the database lock - "hdl_umapdb_lock")
352 352 * to ensure that the multiple accesses (in this case searching for,
353 353 * and then adding _two_ database entries) can be done atomically.
354 354 */
355 355 key = ddi_get_pid();
356 356 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
357 357 status = hermon_umap_db_find_nolock(instance, key,
358 358 MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
359 359 if (status != DDI_SUCCESS) {
360 360 /*
361 361 * If we are in 'maintenance mode', we cannot alloc a UAR page.
362 362 * But we still need some rsrcp value, and a mostly unique
363 363 * hr_indx value. So we set rsrcp to NULL for maintenance
364 364 * mode, and use a rolling count for hr_indx. The field
365 365 * 'hs_open_hr_indx' is used only in this maintenance mode
366 366 * condition.
367 367 *
368 368 * Otherwise, if we are in operational mode then we allocate
369 369 * the UAR page as normal, and use the rsrcp value and tr_indx
370 370 * value from that allocation.
371 371 */
372 372 if (!HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
373 373 rsrcp = NULL;
374 374 hr_indx = state->hs_open_ar_indx++;
375 375 } else {
376 376 /* Allocate a new UAR page for this process */
377 377 status = hermon_rsrc_alloc(state, HERMON_UARPG, 1,
378 378 HERMON_NOSLEEP, &rsrcp);
379 379 if (status != DDI_SUCCESS) {
380 380 mutex_exit(
381 381 &hermon_userland_rsrc_db.hdl_umapdb_lock);
382 382 return (EAGAIN);
383 383 }
384 384
385 385 hr_indx = rsrcp->hr_indx;
386 386 }
387 387
388 388 /*
389 389 * Allocate an entry to track the UAR page resource in the
390 390 * "userland resources database".
391 391 */
392 392 umapdb = hermon_umap_db_alloc(instance, key,
393 393 MLNX_UMAP_UARPG_RSRC, (uint64_t)(uintptr_t)rsrcp);
394 394 if (umapdb == NULL) {
395 395 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
396 396 /* If in "maintenance mode", don't free the rsrc */
397 397 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
398 398 hermon_rsrc_free(state, &rsrcp);
399 399 }
400 400 return (EAGAIN);
401 401 }
402 402
403 403 /*
404 404 * Create a new device number. Minor number is a function of
405 405 * the UAR page index (15 bits) and the device instance number
406 406 * (3 bits).
407 407 */
408 408 dev = makedevice(getmajor(*devp), (hr_indx <<
409 409 HERMON_MINORNUM_SHIFT) | instance);
410 410
411 411 /*
412 412 * Allocate another entry in the "userland resources database"
413 413 * to track the association of the device number (above) to
414 414 * the current process ID (in "key").
415 415 */
416 416 umapdb2 = hermon_umap_db_alloc(instance, dev,
417 417 MLNX_UMAP_PID_RSRC, (uint64_t)key);
418 418 if (umapdb2 == NULL) {
419 419 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
420 420 hermon_umap_db_free(umapdb);
421 421 /* If in "maintenance mode", don't free the rsrc */
422 422 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
423 423 hermon_rsrc_free(state, &rsrcp);
424 424 }
425 425 return (EAGAIN);
426 426 }
427 427
428 428 /* Add the entries to the database */
429 429 hermon_umap_db_add_nolock(umapdb);
430 430 hermon_umap_db_add_nolock(umapdb2);
431 431
432 432 } else {
433 433 /*
434 434 * Return the same device number as on the original open()
435 435 * call. This was calculated as a function of the UAR page
436 436 * index (top 16 bits) and the device instance number
437 437 */
438 438 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
439 439 dev = makedevice(getmajor(*devp), (rsrcp->hr_indx <<
440 440 HERMON_MINORNUM_SHIFT) | instance);
441 441 }
442 442 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
443 443
444 444 *devp = dev;
445 445
446 446 return (0);
447 447 }
448 448
449 449
450 450 /*
451 451 * hermon_close()
452 452 */
453 453 /* ARGSUSED */
454 454 static int
455 455 hermon_close(dev_t dev, int flag, int otyp, cred_t *credp)
456 456 {
457 457 hermon_state_t *state;
458 458 hermon_rsrc_t *rsrcp;
459 459 hermon_umap_db_entry_t *umapdb;
460 460 hermon_umap_db_priv_t *priv;
461 461 minor_t instance;
462 462 uint64_t key, value;
463 463 int status, reset_status = 0;
464 464
465 465 instance = HERMON_DEV_INSTANCE(dev);
466 466 state = ddi_get_soft_state(hermon_statep, instance);
467 467 if (state == NULL) {
468 468 return (ENXIO);
469 469 }
470 470
471 471 /*
472 472 * Search for "dev_t" in the "userland resources database". As
473 473 * explained above in hermon_open(), we can't depend on using the
474 474 * current process ID here to do the lookup because the process
475 475 * that ultimately closes may not be the same one who opened
476 476 * (because of inheritance).
477 477 * So we lookup the "dev_t" (which points to the PID of the process
478 478 * that opened), and we remove the entry from the database (and free
479 479 * it up). Then we do another query based on the PID value. And when
480 480 * we find that database entry, we free it up too and then free the
481 481 * Hermon UAR page resource.
482 482 *
483 483 * Note: We use the hermon_umap_db_find_nolock() database access
484 484 * routine below (with an explicit mutex_enter of the database lock)
485 485 * to ensure that the multiple accesses (which attempt to remove the
486 486 * two database entries) can be done atomically.
487 487 *
488 488 * This works the same in both maintenance mode and HCA mode, except
489 489 * for the call to hermon_rsrc_free(). In the case of maintenance mode,
490 490 * this call is not needed, as it was not allocated in hermon_open()
491 491 * above.
492 492 */
493 493 key = dev;
494 494 mutex_enter(&hermon_userland_rsrc_db.hdl_umapdb_lock);
495 495 status = hermon_umap_db_find_nolock(instance, key, MLNX_UMAP_PID_RSRC,
496 496 &value, HERMON_UMAP_DB_REMOVE, &umapdb);
497 497 if (status == DDI_SUCCESS) {
498 498 /*
499 499 * If the "hdb_priv" field is non-NULL, it indicates that
500 500 * some "on close" handling is still necessary. Call
501 501 * hermon_umap_db_handle_onclose_cb() to do the handling (i.e.
502 502 * to invoke all the registered callbacks). Then free up
503 503 * the resources associated with "hdb_priv" and continue
504 504 * closing.
505 505 */
506 506 priv = (hermon_umap_db_priv_t *)umapdb->hdbe_common.hdb_priv;
507 507 if (priv != NULL) {
508 508 reset_status = hermon_umap_db_handle_onclose_cb(priv);
509 509 kmem_free(priv, sizeof (hermon_umap_db_priv_t));
510 510 umapdb->hdbe_common.hdb_priv = (void *)NULL;
511 511 }
512 512
513 513 hermon_umap_db_free(umapdb);
514 514
515 515 /*
516 516 * Now do another lookup using PID as the key (copy it from
517 517 * "value"). When this lookup is complete, the "value" field
518 518 * will contain the hermon_rsrc_t pointer for the UAR page
519 519 * resource.
520 520 */
521 521 key = value;
522 522 status = hermon_umap_db_find_nolock(instance, key,
523 523 MLNX_UMAP_UARPG_RSRC, &value, HERMON_UMAP_DB_REMOVE,
524 524 &umapdb);
525 525 if (status == DDI_SUCCESS) {
526 526 hermon_umap_db_free(umapdb);
527 527 /* If in "maintenance mode", don't free the rsrc */
528 528 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
529 529 rsrcp = (hermon_rsrc_t *)(uintptr_t)value;
530 530 hermon_rsrc_free(state, &rsrcp);
531 531 }
532 532 }
533 533 }
534 534 mutex_exit(&hermon_userland_rsrc_db.hdl_umapdb_lock);
535 535 return (reset_status);
536 536 }
537 537
538 538
539 539 /*
540 540 * hermon_attach()
541 541 * Context: Only called from attach() path context
↓ open down ↓ |
541 lines elided |
↑ open up ↑ |
542 542 */
543 543 static int
544 544 hermon_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
545 545 {
546 546 hermon_state_t *state;
547 547 ibc_clnt_hdl_t tmp_ibtfpriv;
548 548 ibc_status_t ibc_status;
549 549 int instance;
550 550 int status;
551 551
552 -#ifdef __lock_lint
553 - (void) hermon_quiesce(dip);
554 -#endif
555 -
556 552 switch (cmd) {
557 553 case DDI_ATTACH:
558 554 instance = ddi_get_instance(dip);
559 555 status = ddi_soft_state_zalloc(hermon_statep, instance);
560 556 if (status != DDI_SUCCESS) {
561 557 cmn_err(CE_NOTE, "hermon%d: driver failed to attach: "
562 558 "attach_ssz_fail", instance);
563 559 goto fail_attach_nomsg;
564 560
565 561 }
566 562 state = ddi_get_soft_state(hermon_statep, instance);
567 563 if (state == NULL) {
568 564 ddi_soft_state_free(hermon_statep, instance);
569 565 cmn_err(CE_NOTE, "hermon%d: driver failed to attach: "
570 566 "attach_gss_fail", instance);
571 567 goto fail_attach_nomsg;
572 568 }
573 569
574 570 /* clear the attach error buffer */
575 571 HERMON_ATTACH_MSG_INIT(state->hs_attach_buf);
576 572
577 573 /* Save away devinfo and instance before hermon_fm_init() */
578 574 state->hs_dip = dip;
579 575 state->hs_instance = instance;
580 576
581 577 hermon_fm_init(state);
582 578
583 579 /*
584 580 * Initialize Hermon driver and hardware.
585 581 *
586 582 * Note: If this initialization fails we may still wish to
587 583 * create a device node and remain operational so that Hermon
588 584 * firmware can be updated/flashed (i.e. "maintenance mode").
589 585 * If this is the case, then "hs_operational_mode" will be
590 586 * equal to HERMON_MAINTENANCE_MODE. We will not attempt to
591 587 * attach to the IBTF or register with the IBMF (i.e. no
592 588 * InfiniBand interfaces will be enabled).
593 589 */
594 590 status = hermon_drv_init(state, dip, instance);
595 591 if ((status != DDI_SUCCESS) &&
596 592 (HERMON_IS_OPERATIONAL(state->hs_operational_mode))) {
597 593 goto fail_attach;
598 594 }
599 595
600 596 /*
601 597 * Change the Hermon FM mode
602 598 */
603 599 if ((hermon_get_state(state) & HCA_PIO_FM) &&
604 600 HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
605 601 /*
606 602 * Now we wait for 50ms to give an opportunity
607 603 * to Solaris FMA so that HW errors can be notified.
608 604 * Then check if there are HW errors or not. If
609 605 * a HW error is detected, the Hermon attachment
610 606 * must be failed.
611 607 */
612 608 delay(drv_usectohz(50000));
613 609 if (hermon_init_failure(state)) {
614 610 hermon_drv_fini(state);
615 611 HERMON_WARNING(state, "unable to "
616 612 "attach Hermon due to a HW error");
617 613 HERMON_ATTACH_MSG(state->hs_attach_buf,
618 614 "hermon_attach_failure");
619 615 goto fail_attach;
620 616 }
621 617
622 618 /*
623 619 * There seems no HW errors during the attachment,
624 620 * so let's change the Hermon FM state to the
625 621 * ereport only mode.
626 622 */
627 623 if (hermon_fm_ereport_init(state) != DDI_SUCCESS) {
628 624 /* unwind the resources */
629 625 hermon_drv_fini(state);
630 626 HERMON_ATTACH_MSG(state->hs_attach_buf,
631 627 "hermon_attach_failure");
632 628 goto fail_attach;
633 629 }
634 630 }
635 631
636 632 /* Create the minor node for device */
637 633 status = ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
638 634 DDI_PSEUDO, 0);
639 635 if (status != DDI_SUCCESS) {
640 636 hermon_drv_fini(state);
641 637 HERMON_ATTACH_MSG(state->hs_attach_buf,
642 638 "attach_create_mn_fail");
643 639 goto fail_attach;
644 640 }
645 641
646 642 /*
647 643 * If we are in "maintenance mode", then we don't want to
648 644 * register with the IBTF. All InfiniBand interfaces are
649 645 * uninitialized, and the device is only capable of handling
650 646 * requests to update/flash firmware (or test/debug requests).
651 647 */
652 648 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
653 649 cmn_err(CE_NOTE, "!Hermon is operational\n");
654 650
655 651 /* Attach to InfiniBand Transport Framework (IBTF) */
656 652 ibc_status = ibc_attach(&tmp_ibtfpriv,
657 653 &state->hs_ibtfinfo);
658 654 if (ibc_status != IBC_SUCCESS) {
659 655 cmn_err(CE_CONT, "hermon_attach: ibc_attach "
660 656 "failed\n");
661 657 ddi_remove_minor_node(dip, "devctl");
662 658 hermon_drv_fini(state);
663 659 HERMON_ATTACH_MSG(state->hs_attach_buf,
664 660 "attach_ibcattach_fail");
665 661 goto fail_attach;
666 662 }
667 663
668 664 /*
669 665 * Now that we've successfully attached to the IBTF,
670 666 * we enable all appropriate asynch and CQ events to
671 667 * be forwarded to the IBTF.
672 668 */
673 669 HERMON_ENABLE_IBTF_CALLB(state, tmp_ibtfpriv);
674 670
675 671 ibc_post_attach(state->hs_ibtfpriv);
676 672
677 673 /* Register agents with IB Mgmt Framework (IBMF) */
678 674 status = hermon_agent_handlers_init(state);
679 675 if (status != DDI_SUCCESS) {
680 676 (void) ibc_pre_detach(tmp_ibtfpriv, DDI_DETACH);
681 677 HERMON_QUIESCE_IBTF_CALLB(state);
682 678 if (state->hs_in_evcallb != 0) {
683 679 HERMON_WARNING(state, "unable to "
684 680 "quiesce Hermon IBTF callbacks");
685 681 }
686 682 ibc_detach(tmp_ibtfpriv);
687 683 ddi_remove_minor_node(dip, "devctl");
688 684 hermon_drv_fini(state);
689 685 HERMON_ATTACH_MSG(state->hs_attach_buf,
690 686 "attach_agentinit_fail");
691 687 goto fail_attach;
692 688 }
693 689 }
694 690
695 691 /* Report attach in maintenance mode, if appropriate */
696 692 if (!(HERMON_IS_OPERATIONAL(state->hs_operational_mode))) {
697 693 cmn_err(CE_NOTE, "hermon%d: driver attached "
698 694 "(for maintenance mode only)", state->hs_instance);
699 695 hermon_fm_ereport(state, HCA_IBA_ERR, HCA_ERR_DEGRADED);
700 696 }
701 697
702 698 /* Report that driver was loaded */
703 699 ddi_report_dev(dip);
704 700
705 701 /* Send device information to log file */
706 702 hermon_device_info_report(state);
707 703
708 704 /* DEBUG PRINT */
709 705 cmn_err(CE_CONT, "!Hermon attach complete\n");
710 706 return (DDI_SUCCESS);
711 707
712 708 case DDI_RESUME:
713 709 /* Add code here for DDI_RESUME XXX */
714 710 return (DDI_FAILURE);
715 711
716 712 default:
717 713 cmn_err(CE_WARN, "hermon_attach: unknown cmd (0x%x)\n", cmd);
718 714 break;
719 715 }
720 716
721 717 fail_attach:
722 718 cmn_err(CE_NOTE, "hermon%d: driver failed to attach: %s", instance,
723 719 state->hs_attach_buf);
724 720 if (hermon_get_state(state) & HCA_EREPORT_FM) {
725 721 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
726 722 }
727 723 hermon_drv_fini2(state);
728 724 hermon_fm_fini(state);
729 725 ddi_soft_state_free(hermon_statep, instance);
730 726
731 727 fail_attach_nomsg:
732 728 return (DDI_FAILURE);
733 729 }
734 730
735 731
736 732 /*
737 733 * hermon_detach()
738 734 * Context: Only called from detach() path context
739 735 */
740 736 static int
741 737 hermon_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
742 738 {
743 739 hermon_state_t *state;
744 740 ibc_clnt_hdl_t tmp_ibtfpriv;
745 741 ibc_status_t ibc_status;
746 742 int instance, status;
747 743
748 744 instance = ddi_get_instance(dip);
749 745 state = ddi_get_soft_state(hermon_statep, instance);
750 746 if (state == NULL) {
751 747 return (DDI_FAILURE);
752 748 }
753 749
754 750 switch (cmd) {
755 751 case DDI_DETACH:
756 752 /*
757 753 * If we are in "maintenance mode", then we do not want to
758 754 * do teardown for any of the InfiniBand interfaces.
759 755 * Specifically, this means not detaching from IBTF (we never
760 756 * attached to begin with) and not deregistering from IBMF.
761 757 */
762 758 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
763 759 /* Unregister agents from IB Mgmt Framework (IBMF) */
764 760 status = hermon_agent_handlers_fini(state);
765 761 if (status != DDI_SUCCESS) {
766 762 return (DDI_FAILURE);
767 763 }
768 764
769 765 /*
770 766 * Attempt the "pre-detach" from InfiniBand Transport
771 767 * Framework (IBTF). At this point the IBTF is still
772 768 * capable of handling incoming asynch and completion
773 769 * events. This "pre-detach" is primarily a mechanism
774 770 * to notify the appropriate IBTF clients that the
775 771 * HCA is being removed/offlined.
776 772 */
777 773 ibc_status = ibc_pre_detach(state->hs_ibtfpriv, cmd);
778 774 if (ibc_status != IBC_SUCCESS) {
779 775 status = hermon_agent_handlers_init(state);
780 776 if (status != DDI_SUCCESS) {
781 777 HERMON_WARNING(state, "failed to "
782 778 "restart Hermon agents");
783 779 }
784 780 return (DDI_FAILURE);
785 781 }
786 782
787 783 /*
788 784 * Before we can fully detach from the IBTF we need to
789 785 * ensure that we have handled all outstanding event
790 786 * callbacks. This is accomplished by quiescing the
791 787 * event callback mechanism. Note: if we are unable
792 788 * to successfully quiesce the callbacks, then this is
793 789 * an indication that something has probably gone
794 790 * seriously wrong. We print out a warning, but
795 791 * continue.
796 792 */
797 793 tmp_ibtfpriv = state->hs_ibtfpriv;
798 794 HERMON_QUIESCE_IBTF_CALLB(state);
799 795 if (state->hs_in_evcallb != 0) {
800 796 HERMON_WARNING(state, "unable to quiesce "
801 797 "Hermon IBTF callbacks");
802 798 }
803 799
804 800 /* Complete the detach from the IBTF */
805 801 ibc_detach(tmp_ibtfpriv);
806 802 }
807 803
808 804 /* Remove the minor node for device */
809 805 ddi_remove_minor_node(dip, "devctl");
810 806
811 807 /*
812 808 * Only call hermon_drv_fini() if we are in Hermon HCA mode.
813 809 * (Because if we are in "maintenance mode", then we never
814 810 * successfully finished init.) Only report successful
815 811 * detach for normal HCA mode.
816 812 */
817 813 if (HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
818 814 /* Cleanup driver resources and shutdown hardware */
819 815 hermon_drv_fini(state);
820 816 cmn_err(CE_CONT, "!Hermon driver successfully "
821 817 "detached\n");
822 818 }
823 819
824 820 hermon_drv_fini2(state);
825 821 hermon_fm_fini(state);
826 822 ddi_soft_state_free(hermon_statep, instance);
827 823
828 824 return (DDI_SUCCESS);
829 825
830 826 case DDI_SUSPEND:
831 827 /* Add code here for DDI_SUSPEND XXX */
832 828 return (DDI_FAILURE);
833 829
834 830 default:
835 831 cmn_err(CE_WARN, "hermon_detach: unknown cmd (0x%x)\n", cmd);
836 832 break;
837 833 }
838 834
839 835 return (DDI_FAILURE);
840 836 }
↓ open down ↓ |
275 lines elided |
↑ open up ↑ |
841 837
842 838 /*
843 839 * hermon_dma_attr_init()
844 840 * Context: Can be called from interrupt or base context.
845 841 */
846 842
847 843 /* ARGSUSED */
848 844 void
849 845 hermon_dma_attr_init(hermon_state_t *state, ddi_dma_attr_t *dma_attr)
850 846 {
851 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dma_attr))
852 -
853 847 dma_attr->dma_attr_version = DMA_ATTR_V0;
854 848 dma_attr->dma_attr_addr_lo = 0;
855 849 dma_attr->dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull;
856 850 dma_attr->dma_attr_count_max = 0xFFFFFFFFFFFFFFFFull;
857 851 dma_attr->dma_attr_align = HERMON_PAGESIZE; /* default 4K */
858 852 dma_attr->dma_attr_burstsizes = 0x3FF;
859 853 dma_attr->dma_attr_minxfer = 1;
860 854 dma_attr->dma_attr_maxxfer = 0xFFFFFFFFFFFFFFFFull;
861 855 dma_attr->dma_attr_seg = 0xFFFFFFFFFFFFFFFFull;
862 856 dma_attr->dma_attr_sgllen = 0x7FFFFFFF;
863 857 dma_attr->dma_attr_granular = 1;
864 858 dma_attr->dma_attr_flags = 0;
865 859 }
866 860
867 861 /*
868 862 * hermon_dma_alloc()
869 863 * Context: Can be called from base context.
870 864 */
871 865 int
872 866 hermon_dma_alloc(hermon_state_t *state, hermon_dma_info_t *dma_info,
873 867 uint16_t opcode)
874 868 {
875 869 ddi_dma_handle_t dma_hdl;
876 870 ddi_dma_attr_t dma_attr;
877 871 ddi_acc_handle_t acc_hdl;
878 872 ddi_dma_cookie_t cookie;
879 873 uint64_t kaddr;
880 874 uint64_t real_len;
881 875 uint_t ccount;
882 876 int status;
883 877
884 878 hermon_dma_attr_init(state, &dma_attr);
885 879 #ifdef __sparc
886 880 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
887 881 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
888 882 #endif
889 883
890 884 /* Allocate a DMA handle */
891 885 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, DDI_DMA_SLEEP,
892 886 NULL, &dma_hdl);
893 887 if (status != DDI_SUCCESS) {
894 888 IBTF_DPRINTF_L2("DMA", "alloc handle failed: %d", status);
895 889 cmn_err(CE_CONT, "DMA alloc handle failed(status %d)", status);
896 890 return (DDI_FAILURE);
897 891 }
898 892
899 893 /* Allocate DMA memory */
900 894 status = ddi_dma_mem_alloc(dma_hdl, dma_info->length,
901 895 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
902 896 (caddr_t *)&kaddr, (size_t *)&real_len, &acc_hdl);
903 897 if (status != DDI_SUCCESS) {
904 898 ddi_dma_free_handle(&dma_hdl);
905 899 IBTF_DPRINTF_L2("DMA", "memory alloc failed: %d", status);
906 900 cmn_err(CE_CONT, "DMA memory alloc failed(status %d)", status);
907 901 return (DDI_FAILURE);
908 902 }
909 903 bzero((caddr_t)(uintptr_t)kaddr, real_len);
910 904
911 905 /* Bind the memory to the handle */
912 906 status = ddi_dma_addr_bind_handle(dma_hdl, NULL,
913 907 (caddr_t)(uintptr_t)kaddr, (size_t)real_len, DDI_DMA_RDWR |
914 908 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &cookie, &ccount);
915 909 if (status != DDI_SUCCESS) {
916 910 ddi_dma_mem_free(&acc_hdl);
917 911 ddi_dma_free_handle(&dma_hdl);
918 912 IBTF_DPRINTF_L2("DMA", "bind handle failed: %d", status);
919 913 cmn_err(CE_CONT, "DMA bind handle failed(status %d)", status);
920 914 return (DDI_FAILURE);
921 915 }
922 916
923 917 /* Package the hermon_dma_info contents and return */
924 918 dma_info->vaddr = kaddr;
925 919 dma_info->dma_hdl = dma_hdl;
926 920 dma_info->acc_hdl = acc_hdl;
927 921
928 922 /* Pass the mapping information to the firmware */
929 923 status = hermon_map_cmd_post(state, dma_info, opcode, cookie, ccount);
930 924 if (status != DDI_SUCCESS) {
931 925 char *s;
932 926 hermon_dma_free(dma_info);
933 927 switch (opcode) {
934 928 case MAP_ICM:
935 929 s = "MAP_ICM";
936 930 break;
937 931 case MAP_FA:
938 932 s = "MAP_FA";
939 933 break;
940 934 case MAP_ICM_AUX:
941 935 s = "MAP_ICM_AUX";
942 936 break;
943 937 default:
944 938 s = "UNKNOWN";
945 939 }
946 940 cmn_err(CE_NOTE, "Map cmd '%s' failed, status %08x\n",
947 941 s, status);
948 942 return (DDI_FAILURE);
949 943 }
950 944
951 945 return (DDI_SUCCESS);
952 946 }
953 947
954 948 /*
955 949 * hermon_dma_free()
956 950 * Context: Can be called from base context.
957 951 */
958 952 void
959 953 hermon_dma_free(hermon_dma_info_t *info)
960 954 {
961 955 /* Unbind the handles and free the memory */
962 956 (void) ddi_dma_unbind_handle(info->dma_hdl);
963 957 ddi_dma_mem_free(&info->acc_hdl);
964 958 ddi_dma_free_handle(&info->dma_hdl);
965 959 }
966 960
967 961 /* These macros are valid for use only in hermon_icm_alloc/hermon_icm_free. */
968 962 #define HERMON_ICM_ALLOC(rsrc) \
969 963 hermon_icm_alloc(state, rsrc, index1, index2)
970 964 #define HERMON_ICM_FREE(rsrc) \
971 965 hermon_icm_free(state, rsrc, index1, index2)
972 966
973 967 /*
974 968 * hermon_icm_alloc()
975 969 * Context: Can be called from base context.
976 970 *
977 971 * Only one thread can be here for a given hermon_rsrc_type_t "type".
978 972 *
979 973 * "num_to_hdl" is set if there is a need for lookups from resource
980 974 * number/index to resource handle. This is needed for QPs/CQs/SRQs
981 975 * for the various affiliated events/errors.
982 976 */
983 977 int
984 978 hermon_icm_alloc(hermon_state_t *state, hermon_rsrc_type_t type,
985 979 uint32_t index1, uint32_t index2)
986 980 {
987 981 hermon_icm_table_t *icm;
988 982 hermon_dma_info_t *dma_info;
989 983 uint8_t *bitmap;
990 984 int status;
991 985 int num_to_hdl = 0;
992 986
993 987 if (hermon_verbose) {
994 988 IBTF_DPRINTF_L2("hermon", "hermon_icm_alloc: rsrc_type (0x%x) "
995 989 "index1/2 (0x%x/0x%x)", type, index1, index2);
996 990 }
997 991
998 992 icm = &state->hs_icm[type];
999 993
1000 994 switch (type) {
1001 995 case HERMON_QPC:
1002 996 status = HERMON_ICM_ALLOC(HERMON_CMPT_QPC);
1003 997 if (status != DDI_SUCCESS) {
1004 998 return (status);
1005 999 }
1006 1000 status = HERMON_ICM_ALLOC(HERMON_RDB);
1007 1001 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1008 1002 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1009 1003 return (status);
1010 1004 }
1011 1005 status = HERMON_ICM_ALLOC(HERMON_ALTC);
1012 1006 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1013 1007 HERMON_ICM_FREE(HERMON_RDB);
1014 1008 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1015 1009 return (status);
1016 1010 }
1017 1011 status = HERMON_ICM_ALLOC(HERMON_AUXC);
1018 1012 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
1019 1013 HERMON_ICM_FREE(HERMON_ALTC);
1020 1014 HERMON_ICM_FREE(HERMON_RDB);
1021 1015 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1022 1016 return (status);
1023 1017 }
1024 1018 num_to_hdl = 1;
1025 1019 break;
1026 1020 case HERMON_SRQC:
1027 1021 status = HERMON_ICM_ALLOC(HERMON_CMPT_SRQC);
1028 1022 if (status != DDI_SUCCESS) {
1029 1023 return (status);
1030 1024 }
1031 1025 num_to_hdl = 1;
1032 1026 break;
1033 1027 case HERMON_CQC:
1034 1028 status = HERMON_ICM_ALLOC(HERMON_CMPT_CQC);
1035 1029 if (status != DDI_SUCCESS) {
1036 1030 return (status);
1037 1031 }
1038 1032 num_to_hdl = 1;
1039 1033 break;
1040 1034 case HERMON_EQC:
1041 1035 status = HERMON_ICM_ALLOC(HERMON_CMPT_EQC);
1042 1036 if (status != DDI_SUCCESS) { /* undo icm_alloc's */
↓ open down ↓ |
180 lines elided |
↑ open up ↑ |
1043 1037 return (status);
1044 1038 }
1045 1039 break;
1046 1040 }
1047 1041
1048 1042 /* ensure existence of bitmap and dmainfo, sets "dma_info" */
1049 1043 hermon_bitmap(bitmap, dma_info, icm, index1, num_to_hdl);
1050 1044
1051 1045 /* Set up the DMA handle for allocation and mapping */
1052 1046 dma_info += index2;
1053 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dma_info))
1054 1047 dma_info->length = icm->span << icm->log_object_size;
1055 1048 dma_info->icmaddr = icm->icm_baseaddr +
1056 1049 (((index1 << icm->split_shift) +
1057 1050 (index2 << icm->span_shift)) << icm->log_object_size);
1058 1051
1059 1052 /* Allocate memory for the num_to_qp/cq/srq pointers */
1060 1053 if (num_to_hdl)
1061 1054 icm->num_to_hdl[index1][index2] =
1062 1055 kmem_zalloc(HERMON_ICM_SPAN * sizeof (void *), KM_SLEEP);
1063 1056
1064 1057 if (hermon_verbose) {
1065 1058 IBTF_DPRINTF_L2("hermon", "alloc DMA: "
1066 1059 "rsrc (0x%x) index (%x, %x) "
1067 1060 "icm_addr/len (%llx/%x) bitmap %p", type, index1, index2,
1068 1061 (longlong_t)dma_info->icmaddr, dma_info->length, bitmap);
1069 1062 }
1070 1063
1071 1064 /* Allocate and map memory for this span */
1072 1065 status = hermon_dma_alloc(state, dma_info, MAP_ICM);
1073 1066 if (status != DDI_SUCCESS) {
1074 1067 IBTF_DPRINTF_L2("hermon", "hermon_icm_alloc: DMA "
1075 1068 "allocation failed, status 0x%x", status);
1076 1069 switch (type) {
1077 1070 case HERMON_QPC:
1078 1071 HERMON_ICM_FREE(HERMON_AUXC);
1079 1072 HERMON_ICM_FREE(HERMON_ALTC);
1080 1073 HERMON_ICM_FREE(HERMON_RDB);
1081 1074 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1082 1075 break;
1083 1076 case HERMON_SRQC:
1084 1077 HERMON_ICM_FREE(HERMON_CMPT_SRQC);
1085 1078 break;
1086 1079 case HERMON_CQC:
1087 1080 HERMON_ICM_FREE(HERMON_CMPT_CQC);
1088 1081 break;
1089 1082 case HERMON_EQC:
1090 1083 HERMON_ICM_FREE(HERMON_CMPT_EQC);
1091 1084 break;
1092 1085 }
1093 1086
1094 1087 return (DDI_FAILURE);
1095 1088 }
1096 1089 if (hermon_verbose) {
1097 1090 IBTF_DPRINTF_L2("hermon", "hermon_icm_alloc: mapping ICM: "
1098 1091 "rsrc_type (0x%x) index (0x%x, 0x%x) alloc length (0x%x) "
1099 1092 "icm_addr (0x%lx)", type, index1, index2, dma_info->length,
1100 1093 dma_info->icmaddr);
1101 1094 }
1102 1095
1103 1096 /* Set the bit for this slot in the table bitmap */
1104 1097 HERMON_BMAP_BIT_SET(icm->icm_bitmap[index1], index2);
1105 1098
1106 1099 return (DDI_SUCCESS);
1107 1100 }
1108 1101
1109 1102 /*
1110 1103 * hermon_icm_free()
1111 1104 * Context: Can be called from base context.
1112 1105 *
1113 1106 * ICM resources have been successfully returned from hermon_icm_alloc().
1114 1107 * Associated dma_info is no longer in use. Free the ICM backing memory.
1115 1108 */
1116 1109 void
1117 1110 hermon_icm_free(hermon_state_t *state, hermon_rsrc_type_t type,
1118 1111 uint32_t index1, uint32_t index2)
1119 1112 {
1120 1113 hermon_icm_table_t *icm;
1121 1114 hermon_dma_info_t *dma_info;
1122 1115 int status;
1123 1116
1124 1117 icm = &state->hs_icm[type];
1125 1118 ASSERT(icm->icm_dma[index1][index2].icm_refcnt == 0);
1126 1119
1127 1120 if (hermon_verbose) {
1128 1121 IBTF_DPRINTF_L2("hermon", "hermon_icm_free: rsrc_type (0x%x) "
1129 1122 "index (0x%x, 0x%x)", type, index1, index2);
1130 1123 }
1131 1124
1132 1125 dma_info = icm->icm_dma[index1] + index2;
1133 1126
1134 1127 /* The following only happens if attach() is failing. */
1135 1128 if (dma_info == NULL)
1136 1129 return;
1137 1130
1138 1131 /* Unmap the ICM allocation, then free the backing DMA memory */
1139 1132 status = hermon_unmap_icm_cmd_post(state, dma_info);
1140 1133 if (status != DDI_SUCCESS) {
1141 1134 HERMON_WARNING(state, "UNMAP_ICM failure");
1142 1135 }
1143 1136 hermon_dma_free(dma_info);
1144 1137
1145 1138 /* Clear the bit in the ICM table bitmap */
1146 1139 HERMON_BMAP_BIT_CLR(icm->icm_bitmap[index1], index2);
1147 1140
1148 1141 switch (type) {
1149 1142 case HERMON_QPC:
1150 1143 HERMON_ICM_FREE(HERMON_AUXC);
1151 1144 HERMON_ICM_FREE(HERMON_ALTC);
1152 1145 HERMON_ICM_FREE(HERMON_RDB);
1153 1146 HERMON_ICM_FREE(HERMON_CMPT_QPC);
1154 1147 break;
1155 1148 case HERMON_SRQC:
1156 1149 HERMON_ICM_FREE(HERMON_CMPT_SRQC);
1157 1150 break;
1158 1151 case HERMON_CQC:
1159 1152 HERMON_ICM_FREE(HERMON_CMPT_CQC);
1160 1153 break;
1161 1154 case HERMON_EQC:
1162 1155 HERMON_ICM_FREE(HERMON_CMPT_EQC);
1163 1156 break;
1164 1157
1165 1158 }
1166 1159 }
1167 1160
1168 1161
1169 1162 /*
1170 1163 * hermon_icm_num_to_hdl()
1171 1164 * Context: Can be called from base or interrupt context.
1172 1165 *
1173 1166 * Given an index of a resource, index through the sparsely allocated
1174 1167 * arrays to find the pointer to its software handle. Return NULL if
1175 1168 * any of the arrays of pointers has been freed (should never happen).
1176 1169 */
1177 1170 void *
1178 1171 hermon_icm_num_to_hdl(hermon_state_t *state, hermon_rsrc_type_t type,
1179 1172 uint32_t idx)
1180 1173 {
1181 1174 hermon_icm_table_t *icm;
1182 1175 uint32_t span_offset;
1183 1176 uint32_t index1, index2;
1184 1177 void ***p1, **p2;
1185 1178
1186 1179 icm = &state->hs_icm[type];
1187 1180 hermon_index(index1, index2, idx, icm, span_offset);
1188 1181 p1 = icm->num_to_hdl[index1];
1189 1182 if (p1 == NULL) {
1190 1183 IBTF_DPRINTF_L2("hermon", "icm_num_to_hdl failed at level 1"
1191 1184 ": rsrc_type %d, index 0x%x", type, idx);
1192 1185 return (NULL);
1193 1186 }
1194 1187 p2 = p1[index2];
1195 1188 if (p2 == NULL) {
1196 1189 IBTF_DPRINTF_L2("hermon", "icm_num_to_hdl failed at level 2"
1197 1190 ": rsrc_type %d, index 0x%x", type, idx);
1198 1191 return (NULL);
1199 1192 }
1200 1193 return (p2[span_offset]);
1201 1194 }
1202 1195
1203 1196 /*
1204 1197 * hermon_icm_set_num_to_hdl()
1205 1198 * Context: Can be called from base or interrupt context.
1206 1199 *
1207 1200 * Given an index of a resource, we index through the sparsely allocated
1208 1201 * arrays to store the software handle, used by hermon_icm_num_to_hdl().
1209 1202 * This function is used to both set and reset (set to NULL) the handle.
1210 1203 * This table is allocated during ICM allocation for the given resource,
1211 1204 * so its existence is a given, and the store location does not conflict
1212 1205 * with any other stores to the table (no locking needed).
1213 1206 */
1214 1207 void
1215 1208 hermon_icm_set_num_to_hdl(hermon_state_t *state, hermon_rsrc_type_t type,
1216 1209 uint32_t idx, void *hdl)
1217 1210 {
1218 1211 hermon_icm_table_t *icm;
1219 1212 uint32_t span_offset;
1220 1213 uint32_t index1, index2;
1221 1214
1222 1215 icm = &state->hs_icm[type];
1223 1216 hermon_index(index1, index2, idx, icm, span_offset);
1224 1217 ASSERT((hdl == NULL) ^
1225 1218 (icm->num_to_hdl[index1][index2][span_offset] == NULL));
1226 1219 icm->num_to_hdl[index1][index2][span_offset] = hdl;
1227 1220 }
1228 1221
1229 1222 /*
1230 1223 * hermon_device_mode()
1231 1224 * Context: Can be called from base or interrupt context.
1232 1225 *
1233 1226 * Return HERMON_HCA_MODE for operational mode
1234 1227 * Return HERMON_MAINTENANCE_MODE for maintenance mode
1235 1228 * Return 0 otherwise
1236 1229 *
1237 1230 * A non-zero return for either operational or maintenance mode simplifies
1238 1231 * one of the 2 uses of this function.
1239 1232 */
1240 1233 int
1241 1234 hermon_device_mode(hermon_state_t *state)
1242 1235 {
1243 1236 if (state->hs_vendor_id != PCI_VENID_MLX)
1244 1237 return (0);
1245 1238
1246 1239 switch (state->hs_device_id) {
1247 1240 case PCI_DEVID_HERMON_SDR:
1248 1241 case PCI_DEVID_HERMON_DDR:
1249 1242 case PCI_DEVID_HERMON_DDRG2:
1250 1243 case PCI_DEVID_HERMON_QDRG2:
1251 1244 case PCI_DEVID_HERMON_QDRG2V:
1252 1245 return (HERMON_HCA_MODE);
1253 1246 case PCI_DEVID_HERMON_MAINT:
1254 1247 return (HERMON_MAINTENANCE_MODE);
1255 1248 default:
1256 1249 return (0);
1257 1250 }
1258 1251 }
1259 1252
1260 1253 /*
1261 1254 * hermon_drv_init()
1262 1255 * Context: Only called from attach() path context
1263 1256 */
1264 1257 /* ARGSUSED */
1265 1258 static int
1266 1259 hermon_drv_init(hermon_state_t *state, dev_info_t *dip, int instance)
1267 1260 {
1268 1261 int status;
1269 1262
1270 1263 /* Retrieve PCI device, vendor and rev IDs */
1271 1264 state->hs_vendor_id = HERMON_GET_VENDOR_ID(state->hs_dip);
1272 1265 state->hs_device_id = HERMON_GET_DEVICE_ID(state->hs_dip);
1273 1266 state->hs_revision_id = HERMON_GET_REVISION_ID(state->hs_dip);
1274 1267
1275 1268 /*
1276 1269 * Check and set the operational mode of the device. If the driver is
1277 1270 * bound to the Hermon device in "maintenance mode", then this generally
1278 1271 * means that either the device has been specifically jumpered to
1279 1272 * start in this mode or the firmware boot process has failed to
1280 1273 * successfully load either the primary or the secondary firmware
1281 1274 * image.
1282 1275 */
1283 1276 state->hs_operational_mode = hermon_device_mode(state);
1284 1277 switch (state->hs_operational_mode) {
1285 1278 case HERMON_HCA_MODE:
1286 1279 state->hs_cfg_profile_setting = HERMON_CFG_MEMFREE;
1287 1280 break;
1288 1281 case HERMON_MAINTENANCE_MODE:
1289 1282 HERMON_FMANOTE(state, HERMON_FMA_MAINT);
1290 1283 state->hs_fm_degraded_reason = HCA_FW_MISC; /* not fw reason */
1291 1284 return (DDI_FAILURE);
1292 1285 default:
1293 1286 HERMON_FMANOTE(state, HERMON_FMA_PCIID);
1294 1287 HERMON_WARNING(state, "unexpected device type detected");
1295 1288 return (DDI_FAILURE);
1296 1289 }
1297 1290
1298 1291 /*
1299 1292 * Initialize the Hermon hardware.
1300 1293 *
1301 1294 * Note: If this routine returns an error, it is often a reasonably
1302 1295 * good indication that something Hermon firmware-related has caused
1303 1296 * the failure or some HW related errors have caused the failure.
1304 1297 * (also there are few possibilities that SW (e.g. SW resource
1305 1298 * shortage) can cause the failure, but the majority case is due to
1306 1299 * either a firmware related error or a HW related one) In order to
1307 1300 * give the user an opportunity (if desired) to update or reflash
1308 1301 * the Hermon firmware image, we set "hs_operational_mode" flag
1309 1302 * (described above) to indicate that we wish to enter maintenance
1310 1303 * mode in case of the firmware-related issue.
1311 1304 */
1312 1305 status = hermon_hw_init(state);
1313 1306 if (status != DDI_SUCCESS) {
1314 1307 cmn_err(CE_NOTE, "hermon%d: error during attach: %s", instance,
1315 1308 state->hs_attach_buf);
1316 1309 return (DDI_FAILURE);
1317 1310 }
1318 1311
1319 1312 /*
1320 1313 * Now that the ISR has been setup, arm all the EQs for event
1321 1314 * generation.
1322 1315 */
1323 1316
1324 1317 status = hermon_eq_arm_all(state);
1325 1318 if (status != DDI_SUCCESS) {
1326 1319 cmn_err(CE_NOTE, "EQ Arm All failed\n");
1327 1320 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1328 1321 return (DDI_FAILURE);
1329 1322 }
1330 1323
1331 1324 /* test interrupts and event queues */
1332 1325 status = hermon_nop_post(state, 0x0, 0x0);
1333 1326 if (status != DDI_SUCCESS) {
1334 1327 cmn_err(CE_NOTE, "Interrupts/EQs failed\n");
1335 1328 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1336 1329 return (DDI_FAILURE);
1337 1330 }
1338 1331
1339 1332 /* Initialize Hermon softstate */
1340 1333 status = hermon_soft_state_init(state);
1341 1334 if (status != DDI_SUCCESS) {
1342 1335 cmn_err(CE_NOTE, "Failed to init soft state\n");
1343 1336 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1344 1337 return (DDI_FAILURE);
1345 1338 }
1346 1339
1347 1340 return (DDI_SUCCESS);
1348 1341 }
1349 1342
1350 1343
1351 1344 /*
1352 1345 * hermon_drv_fini()
1353 1346 * Context: Only called from attach() and/or detach() path contexts
1354 1347 */
1355 1348 static void
1356 1349 hermon_drv_fini(hermon_state_t *state)
1357 1350 {
1358 1351 /* Cleanup Hermon softstate */
1359 1352 hermon_soft_state_fini(state);
1360 1353
1361 1354 /* Cleanup Hermon resources and shutdown hardware */
1362 1355 hermon_hw_fini(state, HERMON_DRV_CLEANUP_ALL);
1363 1356 }
1364 1357
1365 1358
1366 1359 /*
1367 1360 * hermon_drv_fini2()
1368 1361 * Context: Only called from attach() and/or detach() path contexts
1369 1362 */
1370 1363 static void
1371 1364 hermon_drv_fini2(hermon_state_t *state)
1372 1365 {
1373 1366 if (state->hs_fm_poll_thread) {
1374 1367 ddi_periodic_delete(state->hs_fm_poll_thread);
1375 1368 state->hs_fm_poll_thread = NULL;
1376 1369 }
1377 1370
1378 1371 /* HERMON_DRV_CLEANUP_LEVEL1 */
1379 1372 if (state->hs_fm_cmdhdl) {
1380 1373 hermon_regs_map_free(state, &state->hs_fm_cmdhdl);
1381 1374 state->hs_fm_cmdhdl = NULL;
1382 1375 }
1383 1376
1384 1377 if (state->hs_reg_cmdhdl) {
1385 1378 ddi_regs_map_free(&state->hs_reg_cmdhdl);
1386 1379 state->hs_reg_cmdhdl = NULL;
1387 1380 }
1388 1381
1389 1382 /* HERMON_DRV_CLEANUP_LEVEL0 */
1390 1383 if (state->hs_msix_tbl_entries) {
1391 1384 kmem_free(state->hs_msix_tbl_entries,
1392 1385 state->hs_msix_tbl_size);
1393 1386 state->hs_msix_tbl_entries = NULL;
1394 1387 }
1395 1388
1396 1389 if (state->hs_msix_pba_entries) {
1397 1390 kmem_free(state->hs_msix_pba_entries,
1398 1391 state->hs_msix_pba_size);
1399 1392 state->hs_msix_pba_entries = NULL;
1400 1393 }
1401 1394
1402 1395 if (state->hs_fm_msix_tblhdl) {
1403 1396 hermon_regs_map_free(state, &state->hs_fm_msix_tblhdl);
1404 1397 state->hs_fm_msix_tblhdl = NULL;
1405 1398 }
1406 1399
1407 1400 if (state->hs_reg_msix_tblhdl) {
1408 1401 ddi_regs_map_free(&state->hs_reg_msix_tblhdl);
1409 1402 state->hs_reg_msix_tblhdl = NULL;
1410 1403 }
1411 1404
1412 1405 if (state->hs_fm_msix_pbahdl) {
1413 1406 hermon_regs_map_free(state, &state->hs_fm_msix_pbahdl);
1414 1407 state->hs_fm_msix_pbahdl = NULL;
1415 1408 }
1416 1409
1417 1410 if (state->hs_reg_msix_pbahdl) {
1418 1411 ddi_regs_map_free(&state->hs_reg_msix_pbahdl);
1419 1412 state->hs_reg_msix_pbahdl = NULL;
1420 1413 }
1421 1414
1422 1415 if (state->hs_fm_pcihdl) {
1423 1416 hermon_pci_config_teardown(state, &state->hs_fm_pcihdl);
1424 1417 state->hs_fm_pcihdl = NULL;
1425 1418 }
1426 1419
1427 1420 if (state->hs_reg_pcihdl) {
1428 1421 pci_config_teardown(&state->hs_reg_pcihdl);
1429 1422 state->hs_reg_pcihdl = NULL;
1430 1423 }
1431 1424 }
1432 1425
1433 1426
1434 1427 /*
1435 1428 * hermon_isr_init()
1436 1429 * Context: Only called from attach() path context
1437 1430 */
1438 1431 static int
1439 1432 hermon_isr_init(hermon_state_t *state)
1440 1433 {
1441 1434 int status;
1442 1435 int intr;
1443 1436
1444 1437 for (intr = 0; intr < state->hs_intrmsi_allocd; intr++) {
1445 1438
1446 1439 /*
1447 1440 * Add a handler for the interrupt or MSI
1448 1441 */
1449 1442 status = ddi_intr_add_handler(state->hs_intrmsi_hdl[intr],
1450 1443 hermon_isr, (caddr_t)state, (void *)(uintptr_t)intr);
1451 1444 if (status != DDI_SUCCESS) {
1452 1445 return (DDI_FAILURE);
1453 1446 }
1454 1447
1455 1448 /*
1456 1449 * Enable the software interrupt. Note: depending on the value
1457 1450 * returned in the capability flag, we have to call either
1458 1451 * ddi_intr_block_enable() or ddi_intr_enable().
1459 1452 */
1460 1453 if (state->hs_intrmsi_cap & DDI_INTR_FLAG_BLOCK) {
1461 1454 status = ddi_intr_block_enable(
1462 1455 &state->hs_intrmsi_hdl[intr], 1);
1463 1456 if (status != DDI_SUCCESS) {
1464 1457 return (DDI_FAILURE);
1465 1458 }
1466 1459 } else {
1467 1460 status = ddi_intr_enable(state->hs_intrmsi_hdl[intr]);
1468 1461 if (status != DDI_SUCCESS) {
1469 1462 return (DDI_FAILURE);
1470 1463 }
1471 1464 }
1472 1465 }
1473 1466
1474 1467 /*
1475 1468 * Now that the ISR has been enabled, defer arm_all EQs for event
1476 1469 * generation until later, in case MSIX is enabled
1477 1470 */
1478 1471 return (DDI_SUCCESS);
1479 1472 }
1480 1473
1481 1474
1482 1475 /*
1483 1476 * hermon_isr_fini()
1484 1477 * Context: Only called from attach() and/or detach() path contexts
1485 1478 */
1486 1479 static void
1487 1480 hermon_isr_fini(hermon_state_t *state)
1488 1481 {
1489 1482 int intr;
1490 1483
1491 1484 for (intr = 0; intr < state->hs_intrmsi_allocd; intr++) {
1492 1485 /* Disable the software interrupt */
1493 1486 if (state->hs_intrmsi_cap & DDI_INTR_FLAG_BLOCK) {
1494 1487 (void) ddi_intr_block_disable(
1495 1488 &state->hs_intrmsi_hdl[intr], 1);
1496 1489 } else {
1497 1490 (void) ddi_intr_disable(state->hs_intrmsi_hdl[intr]);
1498 1491 }
1499 1492
1500 1493 /*
1501 1494 * Remove the software handler for the interrupt or MSI
1502 1495 */
1503 1496 (void) ddi_intr_remove_handler(state->hs_intrmsi_hdl[intr]);
1504 1497 }
1505 1498 }
1506 1499
1507 1500
1508 1501 /*
1509 1502 * Sum of ICM configured values:
1510 1503 * cMPT, dMPT, MTT, QPC, SRQC, RDB, CQC, ALTC, AUXC, EQC, MCG
1511 1504 *
1512 1505 */
1513 1506 static uint64_t
1514 1507 hermon_size_icm(hermon_state_t *state)
1515 1508 {
1516 1509 hermon_hw_querydevlim_t *devlim;
1517 1510 hermon_cfg_profile_t *cfg;
1518 1511 uint64_t num_cmpts, num_dmpts, num_mtts;
1519 1512 uint64_t num_qpcs, num_srqc, num_rdbs;
1520 1513 #ifndef HERMON_FW_WORKAROUND
1521 1514 uint64_t num_auxc;
1522 1515 #endif
1523 1516 uint64_t num_cqcs, num_altc;
1524 1517 uint64_t num_eqcs, num_mcgs;
1525 1518 uint64_t size;
1526 1519
1527 1520 devlim = &state->hs_devlim;
1528 1521 cfg = state->hs_cfg_profile;
1529 1522 /* number of respective entries */
1530 1523 num_cmpts = (uint64_t)0x1 << cfg->cp_log_num_cmpt;
1531 1524 num_mtts = (uint64_t)0x1 << cfg->cp_log_num_mtt;
1532 1525 num_dmpts = (uint64_t)0x1 << cfg->cp_log_num_dmpt;
1533 1526 num_qpcs = (uint64_t)0x1 << cfg->cp_log_num_qp;
1534 1527 num_srqc = (uint64_t)0x1 << cfg->cp_log_num_srq;
1535 1528 num_rdbs = (uint64_t)0x1 << cfg->cp_log_num_rdb;
1536 1529 num_cqcs = (uint64_t)0x1 << cfg->cp_log_num_cq;
1537 1530 num_altc = (uint64_t)0x1 << cfg->cp_log_num_qp;
1538 1531 #ifndef HERMON_FW_WORKAROUND
1539 1532 num_auxc = (uint64_t)0x1 << cfg->cp_log_num_qp;
1540 1533 #endif
1541 1534 num_eqcs = (uint64_t)0x1 << cfg->cp_log_num_eq;
1542 1535 num_mcgs = (uint64_t)0x1 << cfg->cp_log_num_mcg;
1543 1536
1544 1537 size =
1545 1538 num_cmpts * devlim->cmpt_entry_sz +
1546 1539 num_dmpts * devlim->dmpt_entry_sz +
1547 1540 num_mtts * devlim->mtt_entry_sz +
1548 1541 num_qpcs * devlim->qpc_entry_sz +
1549 1542 num_srqc * devlim->srq_entry_sz +
1550 1543 num_rdbs * devlim->rdmardc_entry_sz +
1551 1544 num_cqcs * devlim->cqc_entry_sz +
1552 1545 num_altc * devlim->altc_entry_sz +
1553 1546 #ifdef HERMON_FW_WORKAROUND
1554 1547 0x80000000ull +
1555 1548 #else
1556 1549 num_auxc * devlim->aux_entry_sz +
1557 1550 #endif
1558 1551 num_eqcs * devlim->eqc_entry_sz +
1559 1552 num_mcgs * HERMON_MCGMEM_SZ(state);
1560 1553 return (size);
1561 1554 }
1562 1555
1563 1556
1564 1557 /*
1565 1558 * hermon_hw_init()
1566 1559 * Context: Only called from attach() path context
1567 1560 */
1568 1561 static int
1569 1562 hermon_hw_init(hermon_state_t *state)
1570 1563 {
1571 1564 hermon_drv_cleanup_level_t cleanup;
1572 1565 sm_nodeinfo_t nodeinfo;
1573 1566 uint64_t clr_intr_offset;
1574 1567 int status;
1575 1568 uint32_t fw_size; /* in page */
1576 1569 uint64_t offset;
1577 1570
1578 1571 /* This is where driver initialization begins */
1579 1572 cleanup = HERMON_DRV_CLEANUP_LEVEL0;
1580 1573
1581 1574 /* Setup device access attributes */
1582 1575 state->hs_reg_accattr.devacc_attr_version = DDI_DEVICE_ATTR_V1;
1583 1576 state->hs_reg_accattr.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1584 1577 state->hs_reg_accattr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1585 1578 state->hs_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
1586 1579
1587 1580 /* Setup fma-protected access attributes */
1588 1581 state->hs_fm_accattr.devacc_attr_version =
1589 1582 hermon_devacc_attr_version(state);
1590 1583 state->hs_fm_accattr.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1591 1584 state->hs_fm_accattr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1592 1585 /* set acc err protection type */
1593 1586 state->hs_fm_accattr.devacc_attr_access =
1594 1587 hermon_devacc_attr_access(state);
1595 1588
1596 1589 /* Setup for PCI config read/write of HCA device */
1597 1590 status = hermon_pci_config_setup(state, &state->hs_fm_pcihdl);
1598 1591 if (status != DDI_SUCCESS) {
1599 1592 hermon_hw_fini(state, cleanup);
1600 1593 HERMON_ATTACH_MSG(state->hs_attach_buf,
1601 1594 "hw_init_PCI_config_space_regmap_fail");
1602 1595 /* This case is not the degraded one */
1603 1596 return (DDI_FAILURE);
1604 1597 }
1605 1598
1606 1599 /* Map PCI config space and MSI-X tables/pba */
1607 1600 hermon_set_msix_info(state);
1608 1601
1609 1602 /* Map in Hermon registers (CMD, UAR, MSIX) and setup offsets */
1610 1603 status = hermon_regs_map_setup(state, HERMON_CMD_BAR,
1611 1604 &state->hs_reg_cmd_baseaddr, 0, 0, &state->hs_fm_accattr,
1612 1605 &state->hs_fm_cmdhdl);
1613 1606 if (status != DDI_SUCCESS) {
1614 1607 hermon_hw_fini(state, cleanup);
1615 1608 HERMON_ATTACH_MSG(state->hs_attach_buf,
1616 1609 "hw_init_CMD_BAR_regmap_fail");
1617 1610 /* This case is not the degraded one */
1618 1611 return (DDI_FAILURE);
1619 1612 }
1620 1613
1621 1614 cleanup = HERMON_DRV_CLEANUP_LEVEL1;
1622 1615 /*
1623 1616 * We defer UAR-BAR mapping until later. Need to know if
1624 1617 * blueflame mapping is to be done, and don't know that until after
1625 1618 * we get the dev_caps, so do it right after that
1626 1619 */
1627 1620
1628 1621 /*
1629 1622 * There is a third BAR defined for Hermon - it is for MSIX
1630 1623 *
1631 1624 * Will need to explore it's possible need/use w/ Mellanox
1632 1625 * [es] Temporary mapping maybe
1633 1626 */
1634 1627
1635 1628 #ifdef HERMON_SUPPORTS_MSIX_BAR
1636 1629 status = ddi_regs_map_setup(state->hs_dip, HERMON_MSIX_BAR,
1637 1630 &state->hs_reg_msi_baseaddr, 0, 0, &state->hs_reg_accattr,
1638 1631 &state->hs_reg_msihdl);
1639 1632 if (status != DDI_SUCCESS) {
1640 1633 hermon_hw_fini(state, cleanup);
1641 1634 HERMON_ATTACH_MSG(state->hs_attach_buf,
1642 1635 "hw_init_MSIX_BAR_regmap_fail");
1643 1636 /* This case is not the degraded one */
1644 1637 return (DDI_FAILURE);
1645 1638 }
1646 1639 #endif
1647 1640
1648 1641 cleanup = HERMON_DRV_CLEANUP_LEVEL2;
1649 1642
1650 1643 /*
1651 1644 * Save interesting registers away. The offsets of the first two
1652 1645 * here (HCR and sw_reset) are detailed in the PRM, the others are
1653 1646 * derived from values in the QUERY_FW output, so we'll save them
1654 1647 * off later.
1655 1648 */
1656 1649 /* Host Command Register (HCR) */
1657 1650 state->hs_cmd_regs.hcr = (hermon_hw_hcr_t *)
1658 1651 ((uintptr_t)state->hs_reg_cmd_baseaddr + HERMON_CMD_HCR_OFFSET);
1659 1652 state->hs_cmd_toggle = 0; /* initialize it for use */
1660 1653
1661 1654 /* Software Reset register (sw_reset) and semaphore */
1662 1655 state->hs_cmd_regs.sw_reset = (uint32_t *)
1663 1656 ((uintptr_t)state->hs_reg_cmd_baseaddr +
1664 1657 HERMON_CMD_SW_RESET_OFFSET);
1665 1658 state->hs_cmd_regs.sw_semaphore = (uint32_t *)
1666 1659 ((uintptr_t)state->hs_reg_cmd_baseaddr +
1667 1660 HERMON_CMD_SW_SEMAPHORE_OFFSET);
1668 1661
1669 1662 /* make sure init'd before we start filling things in */
1670 1663 bzero(&state->hs_hcaparams, sizeof (struct hermon_hw_initqueryhca_s));
1671 1664
1672 1665 /* Initialize the Phase1 configuration profile */
1673 1666 status = hermon_cfg_profile_init_phase1(state);
1674 1667 if (status != DDI_SUCCESS) {
1675 1668 hermon_hw_fini(state, cleanup);
1676 1669 HERMON_ATTACH_MSG(state->hs_attach_buf,
1677 1670 "hw_init_cfginit1_fail");
1678 1671 /* This case is not the degraded one */
1679 1672 return (DDI_FAILURE);
1680 1673 }
1681 1674 cleanup = HERMON_DRV_CLEANUP_LEVEL3;
1682 1675
1683 1676 /* Do a software reset of the adapter to ensure proper state */
1684 1677 status = hermon_sw_reset(state);
1685 1678 if (status != HERMON_CMD_SUCCESS) {
1686 1679 hermon_hw_fini(state, cleanup);
1687 1680 HERMON_ATTACH_MSG(state->hs_attach_buf,
1688 1681 "hw_init_sw_reset_fail");
1689 1682 /* This case is not the degraded one */
1690 1683 return (DDI_FAILURE);
1691 1684 }
1692 1685
1693 1686 /* Initialize mailboxes */
1694 1687 status = hermon_rsrc_init_phase1(state);
1695 1688 if (status != DDI_SUCCESS) {
1696 1689 hermon_hw_fini(state, cleanup);
1697 1690 HERMON_ATTACH_MSG(state->hs_attach_buf,
1698 1691 "hw_init_rsrcinit1_fail");
1699 1692 /* This case is not the degraded one */
1700 1693 return (DDI_FAILURE);
1701 1694 }
1702 1695 cleanup = HERMON_DRV_CLEANUP_LEVEL4;
1703 1696
1704 1697 /* Post QUERY_FW */
1705 1698 status = hermon_cmn_query_cmd_post(state, QUERY_FW, 0, 0, &state->hs_fw,
1706 1699 sizeof (hermon_hw_queryfw_t), HERMON_CMD_NOSLEEP_SPIN);
1707 1700 if (status != HERMON_CMD_SUCCESS) {
1708 1701 cmn_err(CE_NOTE, "QUERY_FW command failed: %08x\n", status);
1709 1702 hermon_hw_fini(state, cleanup);
1710 1703 HERMON_ATTACH_MSG(state->hs_attach_buf,
1711 1704 "hw_init_query_fw_cmd_fail");
1712 1705 /* This case is not the degraded one */
1713 1706 return (DDI_FAILURE);
1714 1707 }
1715 1708
1716 1709 /* Validate what/that HERMON FW version is appropriate */
1717 1710
1718 1711 status = hermon_fw_version_check(state);
1719 1712 if (status != DDI_SUCCESS) {
1720 1713 HERMON_FMANOTE(state, HERMON_FMA_FWVER);
1721 1714 if (state->hs_operational_mode == HERMON_HCA_MODE) {
1722 1715 cmn_err(CE_CONT, "Unsupported Hermon FW version: "
1723 1716 "expected: %04d.%04d.%04d, "
1724 1717 "actual: %04d.%04d.%04d\n",
1725 1718 HERMON_FW_VER_MAJOR,
1726 1719 HERMON_FW_VER_MINOR,
1727 1720 HERMON_FW_VER_SUBMINOR,
1728 1721 state->hs_fw.fw_rev_major,
1729 1722 state->hs_fw.fw_rev_minor,
1730 1723 state->hs_fw.fw_rev_subminor);
1731 1724 } else {
1732 1725 cmn_err(CE_CONT, "Unsupported FW version: "
1733 1726 "%04d.%04d.%04d\n",
1734 1727 state->hs_fw.fw_rev_major,
1735 1728 state->hs_fw.fw_rev_minor,
1736 1729 state->hs_fw.fw_rev_subminor);
1737 1730 }
1738 1731 state->hs_operational_mode = HERMON_MAINTENANCE_MODE;
1739 1732 state->hs_fm_degraded_reason = HCA_FW_MISMATCH;
1740 1733 hermon_hw_fini(state, cleanup);
1741 1734 HERMON_ATTACH_MSG(state->hs_attach_buf,
1742 1735 "hw_init_checkfwver_fail");
1743 1736 /* This case is the degraded one */
1744 1737 return (HERMON_CMD_BAD_NVMEM);
1745 1738 }
1746 1739
1747 1740 /*
1748 1741 * Save off the rest of the interesting registers that we'll be using.
1749 1742 * Setup the offsets for the other registers.
1750 1743 */
1751 1744
1752 1745 /*
1753 1746 * Hermon does the intr_offset from the BAR - technically should get the
1754 1747 * BAR info from the response, but PRM says it's from BAR0-1, which is
1755 1748 * for us the CMD BAR
1756 1749 */
1757 1750
1758 1751 clr_intr_offset = state->hs_fw.clr_intr_offs & HERMON_CMD_OFFSET_MASK;
1759 1752
1760 1753 /* Save Clear Interrupt address */
1761 1754 state->hs_cmd_regs.clr_intr = (uint64_t *)
1762 1755 (uintptr_t)(state->hs_reg_cmd_baseaddr + clr_intr_offset);
1763 1756
1764 1757 /*
1765 1758 * Set the error buffer also into the structure - used in hermon_event.c
1766 1759 * to check for internal error on the HCA, not reported in eqe or
1767 1760 * (necessarily) by interrupt
1768 1761 */
1769 1762 state->hs_cmd_regs.fw_err_buf = (uint32_t *)(uintptr_t)
1770 1763 (state->hs_reg_cmd_baseaddr + state->hs_fw.error_buf_addr);
1771 1764
1772 1765 /*
1773 1766 * Invoke a polling thread to check the error buffer periodically.
1774 1767 */
1775 1768 if (!hermon_no_inter_err_chk) {
1776 1769 state->hs_fm_poll_thread = ddi_periodic_add(
1777 1770 hermon_inter_err_chk, (void *)state, FM_POLL_INTERVAL,
1778 1771 DDI_IPL_0);
1779 1772 }
1780 1773
1781 1774 cleanup = HERMON_DRV_CLEANUP_LEVEL5;
1782 1775
1783 1776 /*
1784 1777 * Allocate, map, and run the HCA Firmware.
1785 1778 */
1786 1779
1787 1780 /* Allocate memory for the firmware to load into and map it */
1788 1781
1789 1782 /* get next higher power of 2 */
1790 1783 fw_size = 1 << highbit(state->hs_fw.fw_pages);
1791 1784 state->hs_fw_dma.length = fw_size << HERMON_PAGESHIFT;
1792 1785 status = hermon_dma_alloc(state, &state->hs_fw_dma, MAP_FA);
1793 1786 if (status != DDI_SUCCESS) {
1794 1787 cmn_err(CE_NOTE, "FW alloc failed\n");
1795 1788 hermon_hw_fini(state, cleanup);
1796 1789 HERMON_ATTACH_MSG(state->hs_attach_buf,
1797 1790 "hw_init_dma_alloc_fw_fail");
1798 1791 /* This case is not the degraded one */
1799 1792 return (DDI_FAILURE);
1800 1793 }
1801 1794
1802 1795 cleanup = HERMON_DRV_CLEANUP_LEVEL6;
1803 1796
1804 1797 /* Invoke the RUN_FW cmd to run the firmware */
1805 1798 status = hermon_run_fw_cmd_post(state);
1806 1799 if (status != DDI_SUCCESS) {
1807 1800 cmn_err(CE_NOTE, "RUN_FW command failed: 0x%08x\n", status);
1808 1801 if (status == HERMON_CMD_BAD_NVMEM) {
1809 1802 state->hs_operational_mode = HERMON_MAINTENANCE_MODE;
1810 1803 state->hs_fm_degraded_reason = HCA_FW_CORRUPT;
1811 1804 }
1812 1805 hermon_hw_fini(state, cleanup);
1813 1806 HERMON_ATTACH_MSG(state->hs_attach_buf, "hw_init_run_fw_fail");
1814 1807 /*
1815 1808 * If the status is HERMON_CMD_BAD_NVMEM, it's likely the
1816 1809 * firmware is corrupted, so the mode falls into the
1817 1810 * maintenance mode.
1818 1811 */
1819 1812 return (status == HERMON_CMD_BAD_NVMEM ? HERMON_CMD_BAD_NVMEM :
1820 1813 DDI_FAILURE);
1821 1814 }
1822 1815
1823 1816
1824 1817 /*
1825 1818 * QUERY DEVICE LIMITS/CAPABILITIES
1826 1819 * NOTE - in Hermon, the command is changed to QUERY_DEV_CAP,
1827 1820 * but for familiarity we have kept the structure name the
1828 1821 * same as Tavor/Arbel
1829 1822 */
1830 1823
1831 1824 status = hermon_cmn_query_cmd_post(state, QUERY_DEV_CAP, 0, 0,
1832 1825 &state->hs_devlim, sizeof (hermon_hw_querydevlim_t),
1833 1826 HERMON_CMD_NOSLEEP_SPIN);
1834 1827 if (status != HERMON_CMD_SUCCESS) {
1835 1828 cmn_err(CE_NOTE, "QUERY_DEV_CAP command failed: 0x%08x\n",
1836 1829 status);
1837 1830 hermon_hw_fini(state, cleanup);
1838 1831 HERMON_ATTACH_MSG(state->hs_attach_buf, "hw_init_devcap_fail");
1839 1832 /* This case is not the degraded one */
1840 1833 return (DDI_FAILURE);
1841 1834 }
1842 1835
1843 1836 state->hs_rsvd_eqs = max(state->hs_devlim.num_rsvd_eq,
1844 1837 (4 * state->hs_devlim.num_rsvd_uar));
1845 1838
1846 1839 /* now we have enough info to map in the UAR BAR */
1847 1840 /*
1848 1841 * First, we figure out how to map the BAR for UAR - use only half if
1849 1842 * BlueFlame is enabled - in that case the mapped length is 1/2 the
1850 1843 * log_max_uar_sz (max__uar - 1) * 1MB ( +20).
1851 1844 */
1852 1845
1853 1846 if (state->hs_devlim.blu_flm) { /* Blue Flame Enabled */
1854 1847 offset = (uint64_t)1 << (state->hs_devlim.log_max_uar_sz + 20);
1855 1848 } else {
1856 1849 offset = 0; /* a zero length means map the whole thing */
1857 1850 }
1858 1851 status = hermon_regs_map_setup(state, HERMON_UAR_BAR,
1859 1852 &state->hs_reg_uar_baseaddr, 0, offset, &state->hs_fm_accattr,
1860 1853 &state->hs_fm_uarhdl);
1861 1854 if (status != DDI_SUCCESS) {
1862 1855 HERMON_ATTACH_MSG(state->hs_attach_buf, "UAR BAR mapping");
1863 1856 /* This case is not the degraded one */
1864 1857 return (DDI_FAILURE);
1865 1858 }
1866 1859
1867 1860 /* and if BlueFlame is enabled, map the other half there */
1868 1861 if (state->hs_devlim.blu_flm) { /* Blue Flame Enabled */
1869 1862 offset = (uint64_t)1 << (state->hs_devlim.log_max_uar_sz + 20);
1870 1863 status = ddi_regs_map_setup(state->hs_dip, HERMON_UAR_BAR,
1871 1864 &state->hs_reg_bf_baseaddr, offset, offset,
1872 1865 &state->hs_reg_accattr, &state->hs_reg_bfhdl);
1873 1866 if (status != DDI_SUCCESS) {
1874 1867 HERMON_ATTACH_MSG(state->hs_attach_buf,
1875 1868 "BlueFlame BAR mapping");
1876 1869 /* This case is not the degraded one */
1877 1870 return (DDI_FAILURE);
1878 1871 }
1879 1872 /* This will be used in hw_fini if we fail to init. */
1880 1873 state->hs_bf_offset = offset;
1881 1874 }
1882 1875 cleanup = HERMON_DRV_CLEANUP_LEVEL7;
1883 1876
1884 1877 /* Hermon has a couple of things needed for phase 2 in query port */
1885 1878
1886 1879 status = hermon_cmn_query_cmd_post(state, QUERY_PORT, 0, 0x01,
1887 1880 &state->hs_queryport, sizeof (hermon_hw_query_port_t),
1888 1881 HERMON_CMD_NOSLEEP_SPIN);
1889 1882 if (status != HERMON_CMD_SUCCESS) {
1890 1883 cmn_err(CE_NOTE, "QUERY_PORT command failed: 0x%08x\n",
1891 1884 status);
1892 1885 hermon_hw_fini(state, cleanup);
1893 1886 HERMON_ATTACH_MSG(state->hs_attach_buf,
1894 1887 "hw_init_queryport_fail");
1895 1888 /* This case is not the degraded one */
1896 1889 return (DDI_FAILURE);
1897 1890 }
1898 1891
1899 1892 /* Initialize the Phase2 Hermon configuration profile */
1900 1893 status = hermon_cfg_profile_init_phase2(state);
1901 1894 if (status != DDI_SUCCESS) {
1902 1895 cmn_err(CE_NOTE, "CFG phase 2 failed: 0x%08x\n", status);
1903 1896 hermon_hw_fini(state, cleanup);
1904 1897 HERMON_ATTACH_MSG(state->hs_attach_buf,
1905 1898 "hw_init_cfginit2_fail");
1906 1899 /* This case is not the degraded one */
1907 1900 return (DDI_FAILURE);
1908 1901 }
1909 1902
1910 1903 /* Determine and set the ICM size */
1911 1904 state->hs_icm_sz = hermon_size_icm(state);
1912 1905 status = hermon_set_icm_size_cmd_post(state);
1913 1906 if (status != DDI_SUCCESS) {
1914 1907 cmn_err(CE_NOTE, "Hermon: SET_ICM_SIZE cmd failed: 0x%08x\n",
1915 1908 status);
1916 1909 hermon_hw_fini(state, cleanup);
1917 1910 HERMON_ATTACH_MSG(state->hs_attach_buf,
1918 1911 "hw_init_seticmsz_fail");
1919 1912 /* This case is not the degraded one */
1920 1913 return (DDI_FAILURE);
1921 1914 }
1922 1915 /* alloc icm aux physical memory and map it */
1923 1916
1924 1917 state->hs_icma_dma.length = 1 << highbit(state->hs_icma_sz);
1925 1918
1926 1919 status = hermon_dma_alloc(state, &state->hs_icma_dma, MAP_ICM_AUX);
1927 1920 if (status != DDI_SUCCESS) {
1928 1921 cmn_err(CE_NOTE, "failed to alloc (0x%llx) bytes for ICMA\n",
1929 1922 (longlong_t)state->hs_icma_dma.length);
1930 1923 hermon_hw_fini(state, cleanup);
1931 1924 HERMON_ATTACH_MSG(state->hs_attach_buf,
1932 1925 "hw_init_dma_alloc_icm_aux_fail");
1933 1926 /* This case is not the degraded one */
1934 1927 return (DDI_FAILURE);
1935 1928 }
1936 1929 cleanup = HERMON_DRV_CLEANUP_LEVEL8;
1937 1930
1938 1931 cleanup = HERMON_DRV_CLEANUP_LEVEL9;
1939 1932
1940 1933 /* Allocate an array of structures to house the ICM tables */
1941 1934 state->hs_icm = kmem_zalloc(HERMON_NUM_ICM_RESOURCES *
1942 1935 sizeof (hermon_icm_table_t), KM_SLEEP);
1943 1936
1944 1937 /* Set up the ICM address space and the INIT_HCA command input */
1945 1938 status = hermon_icm_config_setup(state, &state->hs_hcaparams);
1946 1939 if (status != HERMON_CMD_SUCCESS) {
1947 1940 cmn_err(CE_NOTE, "ICM configuration failed\n");
1948 1941 hermon_hw_fini(state, cleanup);
1949 1942 HERMON_ATTACH_MSG(state->hs_attach_buf,
1950 1943 "hw_init_icm_config_setup_fail");
1951 1944 /* This case is not the degraded one */
1952 1945 return (DDI_FAILURE);
1953 1946 }
1954 1947 cleanup = HERMON_DRV_CLEANUP_LEVEL10;
1955 1948
1956 1949 /* Initialize the adapter with the INIT_HCA cmd */
1957 1950 status = hermon_init_hca_cmd_post(state, &state->hs_hcaparams,
1958 1951 HERMON_CMD_NOSLEEP_SPIN);
1959 1952 if (status != HERMON_CMD_SUCCESS) {
1960 1953 cmn_err(CE_NOTE, "INIT_HCA command failed: %08x\n", status);
1961 1954 hermon_hw_fini(state, cleanup);
1962 1955 HERMON_ATTACH_MSG(state->hs_attach_buf, "hw_init_hca_fail");
1963 1956 /* This case is not the degraded one */
1964 1957 return (DDI_FAILURE);
1965 1958 }
1966 1959 cleanup = HERMON_DRV_CLEANUP_LEVEL11;
1967 1960
1968 1961 /* Enter the second phase of init for Hermon configuration/resources */
1969 1962 status = hermon_rsrc_init_phase2(state);
1970 1963 if (status != DDI_SUCCESS) {
1971 1964 hermon_hw_fini(state, cleanup);
1972 1965 HERMON_ATTACH_MSG(state->hs_attach_buf,
1973 1966 "hw_init_rsrcinit2_fail");
1974 1967 /* This case is not the degraded one */
1975 1968 return (DDI_FAILURE);
1976 1969 }
1977 1970 cleanup = HERMON_DRV_CLEANUP_LEVEL12;
1978 1971
1979 1972 /* Query the adapter via QUERY_ADAPTER */
1980 1973 status = hermon_cmn_query_cmd_post(state, QUERY_ADAPTER, 0, 0,
1981 1974 &state->hs_adapter, sizeof (hermon_hw_queryadapter_t),
1982 1975 HERMON_CMD_NOSLEEP_SPIN);
1983 1976 if (status != HERMON_CMD_SUCCESS) {
1984 1977 cmn_err(CE_NOTE, "Hermon: QUERY_ADAPTER command failed: %08x\n",
1985 1978 status);
1986 1979 hermon_hw_fini(state, cleanup);
1987 1980 HERMON_ATTACH_MSG(state->hs_attach_buf,
1988 1981 "hw_init_query_adapter_fail");
1989 1982 /* This case is not the degraded one */
1990 1983 return (DDI_FAILURE);
1991 1984 }
1992 1985
1993 1986 /* Allocate protection domain (PD) for Hermon internal use */
1994 1987 status = hermon_pd_alloc(state, &state->hs_pdhdl_internal,
1995 1988 HERMON_SLEEP);
1996 1989 if (status != DDI_SUCCESS) {
1997 1990 cmn_err(CE_NOTE, "failed to alloc internal PD\n");
1998 1991 hermon_hw_fini(state, cleanup);
1999 1992 HERMON_ATTACH_MSG(state->hs_attach_buf,
2000 1993 "hw_init_internal_pd_alloc_fail");
2001 1994 /* This case is not the degraded one */
2002 1995 return (DDI_FAILURE);
2003 1996 }
2004 1997 cleanup = HERMON_DRV_CLEANUP_LEVEL13;
2005 1998
2006 1999 /* Setup UAR page for kernel use */
2007 2000 status = hermon_internal_uarpg_init(state);
2008 2001 if (status != DDI_SUCCESS) {
2009 2002 cmn_err(CE_NOTE, "failed to setup internal UAR\n");
2010 2003 hermon_hw_fini(state, cleanup);
2011 2004 HERMON_ATTACH_MSG(state->hs_attach_buf,
2012 2005 "hw_init_internal_uarpg_alloc_fail");
2013 2006 /* This case is not the degraded one */
2014 2007 return (DDI_FAILURE);
2015 2008 }
2016 2009 cleanup = HERMON_DRV_CLEANUP_LEVEL14;
2017 2010
2018 2011 /* Query and initialize the Hermon interrupt/MSI information */
2019 2012 status = hermon_intr_or_msi_init(state);
2020 2013 if (status != DDI_SUCCESS) {
2021 2014 cmn_err(CE_NOTE, "failed to setup INTR/MSI\n");
2022 2015 hermon_hw_fini(state, cleanup);
2023 2016 HERMON_ATTACH_MSG(state->hs_attach_buf,
2024 2017 "hw_init_intr_or_msi_init_fail");
2025 2018 /* This case is not the degraded one */
2026 2019 return (DDI_FAILURE);
2027 2020 }
2028 2021 cleanup = HERMON_DRV_CLEANUP_LEVEL15;
2029 2022
2030 2023 status = hermon_isr_init(state); /* set up the isr */
2031 2024 if (status != DDI_SUCCESS) {
2032 2025 cmn_err(CE_NOTE, "failed to init isr\n");
2033 2026 hermon_hw_fini(state, cleanup);
2034 2027 HERMON_ATTACH_MSG(state->hs_attach_buf,
2035 2028 "hw_init_isrinit_fail");
2036 2029 /* This case is not the degraded one */
2037 2030 return (DDI_FAILURE);
2038 2031 }
2039 2032 cleanup = HERMON_DRV_CLEANUP_LEVEL16;
2040 2033
2041 2034 /* Setup the event queues */
2042 2035 status = hermon_eq_init_all(state);
2043 2036 if (status != DDI_SUCCESS) {
2044 2037 cmn_err(CE_NOTE, "failed to init EQs\n");
2045 2038 hermon_hw_fini(state, cleanup);
2046 2039 HERMON_ATTACH_MSG(state->hs_attach_buf,
2047 2040 "hw_init_eqinitall_fail");
2048 2041 /* This case is not the degraded one */
2049 2042 return (DDI_FAILURE);
2050 2043 }
2051 2044 cleanup = HERMON_DRV_CLEANUP_LEVEL17;
2052 2045
2053 2046
2054 2047
2055 2048 /* Reserve contexts for QP0 and QP1 */
2056 2049 status = hermon_special_qp_contexts_reserve(state);
2057 2050 if (status != DDI_SUCCESS) {
2058 2051 cmn_err(CE_NOTE, "failed to init special QPs\n");
2059 2052 hermon_hw_fini(state, cleanup);
2060 2053 HERMON_ATTACH_MSG(state->hs_attach_buf,
2061 2054 "hw_init_rsrv_sqp_fail");
2062 2055 /* This case is not the degraded one */
2063 2056 return (DDI_FAILURE);
2064 2057 }
2065 2058 cleanup = HERMON_DRV_CLEANUP_LEVEL18;
2066 2059
2067 2060 /* Initialize for multicast group handling */
2068 2061 status = hermon_mcg_init(state);
2069 2062 if (status != DDI_SUCCESS) {
2070 2063 cmn_err(CE_NOTE, "failed to init multicast\n");
2071 2064 hermon_hw_fini(state, cleanup);
2072 2065 HERMON_ATTACH_MSG(state->hs_attach_buf,
2073 2066 "hw_init_mcg_init_fail");
2074 2067 /* This case is not the degraded one */
2075 2068 return (DDI_FAILURE);
2076 2069 }
2077 2070 cleanup = HERMON_DRV_CLEANUP_LEVEL19;
2078 2071
2079 2072 /* Initialize the Hermon IB port(s) */
2080 2073 status = hermon_hca_port_init(state);
2081 2074 if (status != DDI_SUCCESS) {
2082 2075 cmn_err(CE_NOTE, "failed to init HCA Port\n");
2083 2076 hermon_hw_fini(state, cleanup);
2084 2077 HERMON_ATTACH_MSG(state->hs_attach_buf,
2085 2078 "hw_init_hca_port_init_fail");
2086 2079 /* This case is not the degraded one */
2087 2080 return (DDI_FAILURE);
2088 2081 }
2089 2082
2090 2083 cleanup = HERMON_DRV_CLEANUP_ALL;
2091 2084
2092 2085 /* Determine NodeGUID and SystemImageGUID */
2093 2086 status = hermon_getnodeinfo_cmd_post(state, HERMON_CMD_NOSLEEP_SPIN,
2094 2087 &nodeinfo);
2095 2088 if (status != HERMON_CMD_SUCCESS) {
2096 2089 cmn_err(CE_NOTE, "GetNodeInfo command failed: %08x\n", status);
2097 2090 hermon_hw_fini(state, cleanup);
2098 2091 HERMON_ATTACH_MSG(state->hs_attach_buf,
2099 2092 "hw_init_getnodeinfo_cmd_fail");
2100 2093 /* This case is not the degraded one */
2101 2094 return (DDI_FAILURE);
2102 2095 }
2103 2096
2104 2097 /*
2105 2098 * If the NodeGUID value was set in OBP properties, then we use that
2106 2099 * value. But we still print a message if the value we queried from
2107 2100 * firmware does not match this value.
2108 2101 *
2109 2102 * Otherwise if OBP value is not set then we use the value from
2110 2103 * firmware unconditionally.
2111 2104 */
2112 2105 if (state->hs_cfg_profile->cp_nodeguid) {
2113 2106 state->hs_nodeguid = state->hs_cfg_profile->cp_nodeguid;
2114 2107 } else {
2115 2108 state->hs_nodeguid = nodeinfo.NodeGUID;
2116 2109 }
2117 2110
2118 2111 if (state->hs_nodeguid != nodeinfo.NodeGUID) {
2119 2112 cmn_err(CE_NOTE, "!NodeGUID value queried from firmware "
2120 2113 "does not match value set by device property");
2121 2114 }
2122 2115
2123 2116 /*
2124 2117 * If the SystemImageGUID value was set in OBP properties, then we use
2125 2118 * that value. But we still print a message if the value we queried
2126 2119 * from firmware does not match this value.
2127 2120 *
2128 2121 * Otherwise if OBP value is not set then we use the value from
2129 2122 * firmware unconditionally.
2130 2123 */
2131 2124 if (state->hs_cfg_profile->cp_sysimgguid) {
2132 2125 state->hs_sysimgguid = state->hs_cfg_profile->cp_sysimgguid;
2133 2126 } else {
2134 2127 state->hs_sysimgguid = nodeinfo.SystemImageGUID;
2135 2128 }
2136 2129
2137 2130 if (state->hs_sysimgguid != nodeinfo.SystemImageGUID) {
2138 2131 cmn_err(CE_NOTE, "!SystemImageGUID value queried from firmware "
2139 2132 "does not match value set by device property");
2140 2133 }
2141 2134
2142 2135 /* Get NodeDescription */
2143 2136 status = hermon_getnodedesc_cmd_post(state, HERMON_CMD_NOSLEEP_SPIN,
2144 2137 (sm_nodedesc_t *)&state->hs_nodedesc);
2145 2138 if (status != HERMON_CMD_SUCCESS) {
2146 2139 cmn_err(CE_CONT, "GetNodeDesc command failed: %08x\n", status);
2147 2140 hermon_hw_fini(state, cleanup);
2148 2141 HERMON_ATTACH_MSG(state->hs_attach_buf,
2149 2142 "hw_init_getnodedesc_cmd_fail");
2150 2143 /* This case is not the degraded one */
2151 2144 return (DDI_FAILURE);
2152 2145 }
2153 2146
2154 2147 return (DDI_SUCCESS);
2155 2148 }
2156 2149
2157 2150
2158 2151 /*
2159 2152 * hermon_hw_fini()
2160 2153 * Context: Only called from attach() and/or detach() path contexts
2161 2154 */
2162 2155 static void
2163 2156 hermon_hw_fini(hermon_state_t *state, hermon_drv_cleanup_level_t cleanup)
2164 2157 {
2165 2158 uint_t num_ports;
2166 2159 int i, status;
2167 2160
2168 2161
2169 2162 /*
2170 2163 * JBDB - We might not want to run these returns in all cases of
2171 2164 * Bad News. We should still attempt to free all of the DMA memory
2172 2165 * resources... This needs to be worked last, after all allocations
2173 2166 * are implemented. For now, and possibly for later, this works.
2174 2167 */
2175 2168
2176 2169 switch (cleanup) {
2177 2170 /*
2178 2171 * If we add more driver initialization steps that should be cleaned
2179 2172 * up here, we need to ensure that HERMON_DRV_CLEANUP_ALL is still the
2180 2173 * first entry (i.e. corresponds to the last init step).
2181 2174 */
2182 2175 case HERMON_DRV_CLEANUP_ALL:
2183 2176 /* Shutdown the Hermon IB port(s) */
2184 2177 num_ports = state->hs_cfg_profile->cp_num_ports;
2185 2178 (void) hermon_hca_ports_shutdown(state, num_ports);
2186 2179 /* FALLTHROUGH */
2187 2180
2188 2181 case HERMON_DRV_CLEANUP_LEVEL19:
2189 2182 /* Teardown resources used for multicast group handling */
2190 2183 hermon_mcg_fini(state);
2191 2184 /* FALLTHROUGH */
2192 2185
2193 2186 case HERMON_DRV_CLEANUP_LEVEL18:
2194 2187 /* Unreserve the special QP contexts */
2195 2188 hermon_special_qp_contexts_unreserve(state);
2196 2189 /* FALLTHROUGH */
2197 2190
2198 2191 case HERMON_DRV_CLEANUP_LEVEL17:
2199 2192 /*
2200 2193 * Attempt to teardown all event queues (EQ). If we fail
2201 2194 * here then print a warning message and return. Something
2202 2195 * (either in HW or SW) has gone seriously wrong.
2203 2196 */
2204 2197 status = hermon_eq_fini_all(state);
2205 2198 if (status != DDI_SUCCESS) {
2206 2199 HERMON_WARNING(state, "failed to teardown EQs");
2207 2200 return;
2208 2201 }
2209 2202 /* FALLTHROUGH */
2210 2203 case HERMON_DRV_CLEANUP_LEVEL16:
2211 2204 /* Teardown Hermon interrupts */
2212 2205 hermon_isr_fini(state);
2213 2206 /* FALLTHROUGH */
2214 2207
2215 2208 case HERMON_DRV_CLEANUP_LEVEL15:
2216 2209 status = hermon_intr_or_msi_fini(state);
2217 2210 if (status != DDI_SUCCESS) {
2218 2211 HERMON_WARNING(state, "failed to free intr/MSI");
2219 2212 return;
2220 2213 }
2221 2214 /* FALLTHROUGH */
2222 2215
2223 2216 case HERMON_DRV_CLEANUP_LEVEL14:
2224 2217 /* Free the resources for the Hermon internal UAR pages */
2225 2218 hermon_internal_uarpg_fini(state);
2226 2219 /* FALLTHROUGH */
2227 2220
2228 2221 case HERMON_DRV_CLEANUP_LEVEL13:
2229 2222 /*
2230 2223 * Free the PD that was used internally by Hermon software. If
2231 2224 * we fail here then print a warning and return. Something
2232 2225 * (probably software-related, but perhaps HW) has gone wrong.
2233 2226 */
2234 2227 status = hermon_pd_free(state, &state->hs_pdhdl_internal);
2235 2228 if (status != DDI_SUCCESS) {
2236 2229 HERMON_WARNING(state, "failed to free internal PD");
2237 2230 return;
2238 2231 }
2239 2232 /* FALLTHROUGH */
2240 2233
2241 2234 case HERMON_DRV_CLEANUP_LEVEL12:
2242 2235 /* Cleanup all the phase2 resources first */
2243 2236 hermon_rsrc_fini(state, HERMON_RSRC_CLEANUP_ALL);
2244 2237 /* FALLTHROUGH */
2245 2238
2246 2239 case HERMON_DRV_CLEANUP_LEVEL11:
2247 2240 /* LEVEL11 is after INIT_HCA */
2248 2241 /* FALLTHROUGH */
2249 2242
2250 2243
2251 2244 case HERMON_DRV_CLEANUP_LEVEL10:
2252 2245 /*
2253 2246 * Unmap the ICM memory area with UNMAP_ICM command.
2254 2247 */
2255 2248 status = hermon_unmap_icm_cmd_post(state, NULL);
2256 2249 if (status != DDI_SUCCESS) {
2257 2250 cmn_err(CE_WARN,
2258 2251 "hermon_hw_fini: failed to unmap ICM\n");
2259 2252 }
2260 2253
2261 2254 /* Free the initial ICM DMA handles */
2262 2255 hermon_icm_dma_fini(state);
2263 2256
2264 2257 /* Free the ICM table structures */
2265 2258 hermon_icm_tables_fini(state);
2266 2259
2267 2260 /* Free the ICM table handles */
2268 2261 kmem_free(state->hs_icm, HERMON_NUM_ICM_RESOURCES *
2269 2262 sizeof (hermon_icm_table_t));
2270 2263
2271 2264 /* FALLTHROUGH */
2272 2265
2273 2266 case HERMON_DRV_CLEANUP_LEVEL9:
2274 2267 /*
2275 2268 * Unmap the ICM Aux memory area with UNMAP_ICM_AUX command.
2276 2269 */
2277 2270 status = hermon_unmap_icm_aux_cmd_post(state);
2278 2271 if (status != HERMON_CMD_SUCCESS) {
2279 2272 cmn_err(CE_NOTE,
2280 2273 "hermon_hw_fini: failed to unmap ICMA\n");
2281 2274 }
2282 2275 /* FALLTHROUGH */
2283 2276
2284 2277 case HERMON_DRV_CLEANUP_LEVEL8:
2285 2278 /*
2286 2279 * Deallocate ICM Aux DMA memory.
2287 2280 */
2288 2281 hermon_dma_free(&state->hs_icma_dma);
2289 2282 /* FALLTHROUGH */
2290 2283
2291 2284 case HERMON_DRV_CLEANUP_LEVEL7:
2292 2285 if (state->hs_fm_uarhdl) {
2293 2286 hermon_regs_map_free(state, &state->hs_fm_uarhdl);
2294 2287 state->hs_fm_uarhdl = NULL;
2295 2288 }
2296 2289
2297 2290 if (state->hs_reg_uarhdl) {
2298 2291 ddi_regs_map_free(&state->hs_reg_uarhdl);
2299 2292 state->hs_reg_uarhdl = NULL;
2300 2293 }
2301 2294
2302 2295 if (state->hs_bf_offset != 0 && state->hs_reg_bfhdl) {
2303 2296 ddi_regs_map_free(&state->hs_reg_bfhdl);
2304 2297 state->hs_reg_bfhdl = NULL;
2305 2298 }
2306 2299
2307 2300 for (i = 0; i < HERMON_MAX_PORTS; i++) {
2308 2301 if (state->hs_pkey[i]) {
2309 2302 kmem_free(state->hs_pkey[i], (1 <<
2310 2303 state->hs_cfg_profile->cp_log_max_pkeytbl) *
2311 2304 sizeof (ib_pkey_t));
2312 2305 state->hs_pkey[i] = NULL;
2313 2306 }
2314 2307 if (state->hs_guid[i]) {
2315 2308 kmem_free(state->hs_guid[i], (1 <<
2316 2309 state->hs_cfg_profile->cp_log_max_gidtbl) *
2317 2310 sizeof (ib_guid_t));
2318 2311 state->hs_guid[i] = NULL;
2319 2312 }
2320 2313 }
2321 2314 /* FALLTHROUGH */
2322 2315
2323 2316 case HERMON_DRV_CLEANUP_LEVEL6:
2324 2317 /*
2325 2318 * Unmap the firmware memory area with UNMAP_FA command.
2326 2319 */
2327 2320 status = hermon_unmap_fa_cmd_post(state);
2328 2321
2329 2322 if (status != HERMON_CMD_SUCCESS) {
2330 2323 cmn_err(CE_NOTE,
2331 2324 "hermon_hw_fini: failed to unmap FW\n");
2332 2325 }
2333 2326
2334 2327 /*
2335 2328 * Deallocate firmware DMA memory.
2336 2329 */
2337 2330 hermon_dma_free(&state->hs_fw_dma);
2338 2331 /* FALLTHROUGH */
2339 2332
2340 2333 case HERMON_DRV_CLEANUP_LEVEL5:
2341 2334 /* stop the poll thread */
2342 2335 if (state->hs_fm_poll_thread) {
2343 2336 ddi_periodic_delete(state->hs_fm_poll_thread);
2344 2337 state->hs_fm_poll_thread = NULL;
2345 2338 }
2346 2339 /* FALLTHROUGH */
2347 2340
2348 2341 case HERMON_DRV_CLEANUP_LEVEL4:
2349 2342 /* Then cleanup the phase1 resources */
2350 2343 hermon_rsrc_fini(state, HERMON_RSRC_CLEANUP_PHASE1_COMPLETE);
2351 2344 /* FALLTHROUGH */
2352 2345
2353 2346 case HERMON_DRV_CLEANUP_LEVEL3:
2354 2347 /* Teardown any resources allocated for the config profile */
2355 2348 hermon_cfg_profile_fini(state);
2356 2349 /* FALLTHROUGH */
2357 2350
2358 2351 case HERMON_DRV_CLEANUP_LEVEL2:
2359 2352 #ifdef HERMON_SUPPORTS_MSIX_BAR
2360 2353 /*
2361 2354 * unmap 3rd BAR, MSIX BAR
2362 2355 */
2363 2356 if (state->hs_reg_msihdl) {
2364 2357 ddi_regs_map_free(&state->hs_reg_msihdl);
2365 2358 state->hs_reg_msihdl = NULL;
2366 2359 }
2367 2360 /* FALLTHROUGH */
2368 2361 #endif
2369 2362 case HERMON_DRV_CLEANUP_LEVEL1:
2370 2363 case HERMON_DRV_CLEANUP_LEVEL0:
2371 2364 /*
2372 2365 * LEVEL1 and LEVEL0 resources are freed in
2373 2366 * hermon_drv_fini2().
2374 2367 */
2375 2368 break;
2376 2369
2377 2370 default:
2378 2371 HERMON_WARNING(state, "unexpected driver cleanup level");
2379 2372 return;
2380 2373 }
2381 2374 }
2382 2375
2383 2376
2384 2377 /*
2385 2378 * hermon_soft_state_init()
2386 2379 * Context: Only called from attach() path context
2387 2380 */
2388 2381 static int
2389 2382 hermon_soft_state_init(hermon_state_t *state)
2390 2383 {
2391 2384 ibt_hca_attr_t *hca_attr;
2392 2385 uint64_t maxval, val;
2393 2386 ibt_hca_flags_t caps = IBT_HCA_NO_FLAGS;
2394 2387 ibt_hca_flags2_t caps2 = IBT_HCA2_NO_FLAGS;
2395 2388 int status;
2396 2389 int max_send_wqe_bytes;
2397 2390 int max_recv_wqe_bytes;
2398 2391
2399 2392 /*
2400 2393 * The ibc_hca_info_t struct is passed to the IBTF. This is the
2401 2394 * routine where we initialize it. Many of the init values come from
2402 2395 * either configuration variables or successful queries of the Hermon
2403 2396 * hardware abilities
2404 2397 */
2405 2398 state->hs_ibtfinfo.hca_ci_vers = IBCI_V4;
2406 2399 state->hs_ibtfinfo.hca_handle = (ibc_hca_hdl_t)state;
2407 2400 state->hs_ibtfinfo.hca_ops = &hermon_ibc_ops;
2408 2401
2409 2402 hca_attr = kmem_zalloc(sizeof (ibt_hca_attr_t), KM_SLEEP);
2410 2403 state->hs_ibtfinfo.hca_attr = hca_attr;
2411 2404
2412 2405 hca_attr->hca_dip = state->hs_dip;
2413 2406 hca_attr->hca_fw_major_version = state->hs_fw.fw_rev_major;
2414 2407 hca_attr->hca_fw_minor_version = state->hs_fw.fw_rev_minor;
2415 2408 hca_attr->hca_fw_micro_version = state->hs_fw.fw_rev_subminor;
2416 2409
2417 2410 /* CQ interrupt moderation maximums - each limited to 16 bits */
2418 2411 hca_attr->hca_max_cq_mod_count = 0xFFFF;
2419 2412 hca_attr->hca_max_cq_mod_usec = 0xFFFF;
2420 2413 hca_attr->hca_max_cq_handlers = state->hs_intrmsi_allocd;
2421 2414
2422 2415
2423 2416 /*
2424 2417 * Determine HCA capabilities:
2425 2418 * No default support for IBT_HCA_RD, IBT_HCA_RAW_MULTICAST,
2426 2419 * IBT_HCA_ATOMICS_GLOBAL, IBT_HCA_RESIZE_CHAN, IBT_HCA_INIT_TYPE,
2427 2420 * or IBT_HCA_SHUTDOWN_PORT
2428 2421 * But IBT_HCA_AH_PORT_CHECK, IBT_HCA_SQD_RTS_PORT, IBT_HCA_SI_GUID,
2429 2422 * IBT_HCA_RNR_NAK, IBT_HCA_CURRENT_QP_STATE, IBT_HCA_PORT_UP,
2430 2423 * IBT_HCA_SRQ, IBT_HCA_RESIZE_SRQ and IBT_HCA_FMR are always
2431 2424 * supported
2432 2425 * All other features are conditionally supported, depending on the
2433 2426 * status return by the Hermon HCA in QUERY_DEV_LIM.
2434 2427 */
2435 2428 if (state->hs_devlim.ud_multi) {
2436 2429 caps |= IBT_HCA_UD_MULTICAST;
2437 2430 }
2438 2431 if (state->hs_devlim.atomic) {
2439 2432 caps |= IBT_HCA_ATOMICS_HCA;
2440 2433 }
2441 2434 if (state->hs_devlim.apm) {
2442 2435 caps |= IBT_HCA_AUTO_PATH_MIG;
2443 2436 }
2444 2437 if (state->hs_devlim.pkey_v) {
2445 2438 caps |= IBT_HCA_PKEY_CNTR;
2446 2439 }
2447 2440 if (state->hs_devlim.qkey_v) {
2448 2441 caps |= IBT_HCA_QKEY_CNTR;
2449 2442 }
2450 2443 if (state->hs_devlim.ipoib_cksm) {
2451 2444 caps |= IBT_HCA_CKSUM_FULL;
2452 2445 caps2 |= IBT_HCA2_IP_CLASS;
2453 2446 }
2454 2447 if (state->hs_devlim.mod_wr_srq) {
2455 2448 caps |= IBT_HCA_RESIZE_SRQ;
2456 2449 }
2457 2450 if (state->hs_devlim.lif) {
2458 2451 caps |= IBT_HCA_LOCAL_INVAL_FENCE;
2459 2452 }
2460 2453 if (state->hs_devlim.reserved_lkey) {
2461 2454 caps2 |= IBT_HCA2_RES_LKEY;
2462 2455 hca_attr->hca_reserved_lkey = state->hs_devlim.rsv_lkey;
2463 2456 }
2464 2457 if (state->hs_devlim.local_inv && state->hs_devlim.remote_inv &&
2465 2458 state->hs_devlim.fast_reg_wr) { /* fw needs to be >= 2.7.000 */
2466 2459 if ((state->hs_fw.fw_rev_major > 2) ||
2467 2460 ((state->hs_fw.fw_rev_major == 2) &&
2468 2461 (state->hs_fw.fw_rev_minor >= 7)))
2469 2462 caps2 |= IBT_HCA2_MEM_MGT_EXT;
2470 2463 }
2471 2464 if (state->hs_devlim.log_max_rss_tbl_sz) {
2472 2465 hca_attr->hca_rss_max_log2_table =
2473 2466 state->hs_devlim.log_max_rss_tbl_sz;
2474 2467 if (state->hs_devlim.rss_xor)
2475 2468 caps2 |= IBT_HCA2_RSS_XOR_ALG;
2476 2469 if (state->hs_devlim.rss_toep)
2477 2470 caps2 |= IBT_HCA2_RSS_TPL_ALG;
2478 2471 }
2479 2472 if (state->hs_devlim.mps) {
2480 2473 caps |= IBT_HCA_ZERO_BASED_VA;
2481 2474 }
2482 2475 if (state->hs_devlim.zb) {
2483 2476 caps |= IBT_HCA_MULT_PAGE_SZ_MR;
2484 2477 }
2485 2478 caps |= (IBT_HCA_AH_PORT_CHECK | IBT_HCA_SQD_SQD_PORT |
2486 2479 IBT_HCA_SI_GUID | IBT_HCA_RNR_NAK | IBT_HCA_CURRENT_QP_STATE |
2487 2480 IBT_HCA_PORT_UP | IBT_HCA_RC_SRQ | IBT_HCA_UD_SRQ | IBT_HCA_FMR);
2488 2481 caps2 |= IBT_HCA2_DMA_MR;
2489 2482
2490 2483 if (state->hs_devlim.log_max_gso_sz) {
2491 2484 hca_attr->hca_max_lso_size =
2492 2485 (1 << state->hs_devlim.log_max_gso_sz);
2493 2486 /* 64 = ctrl & datagram seg, 4 = LSO seg, 16 = 1 SGL */
2494 2487 hca_attr->hca_max_lso_hdr_size =
2495 2488 state->hs_devlim.max_desc_sz_sq - (64 + 4 + 16);
2496 2489 }
2497 2490
2498 2491 caps |= IBT_HCA_WQE_SIZE_INFO;
2499 2492 max_send_wqe_bytes = state->hs_devlim.max_desc_sz_sq;
2500 2493 max_recv_wqe_bytes = state->hs_devlim.max_desc_sz_rq;
2501 2494 hca_attr->hca_ud_send_sgl_sz = (max_send_wqe_bytes / 16) - 4;
2502 2495 hca_attr->hca_conn_send_sgl_sz = (max_send_wqe_bytes / 16) - 1;
2503 2496 hca_attr->hca_conn_rdma_sgl_overhead = 1;
2504 2497 hca_attr->hca_conn_rdma_write_sgl_sz = (max_send_wqe_bytes / 16) - 2;
2505 2498 hca_attr->hca_conn_rdma_read_sgl_sz = (512 / 16) - 2; /* see PRM */
2506 2499 hca_attr->hca_recv_sgl_sz = max_recv_wqe_bytes / 16;
2507 2500
2508 2501 /* We choose not to support "inline" unless it improves performance */
2509 2502 hca_attr->hca_max_inline_size = 0;
2510 2503 hca_attr->hca_ud_send_inline_sz = 0;
2511 2504 hca_attr->hca_conn_send_inline_sz = 0;
2512 2505 hca_attr->hca_conn_rdmaw_inline_overhead = 4;
2513 2506
2514 2507 #if defined(_ELF64)
2515 2508 /* 32-bit kernels are too small for Fibre Channel over IB */
2516 2509 if (state->hs_devlim.fcoib && (caps2 & IBT_HCA2_MEM_MGT_EXT)) {
2517 2510 caps2 |= IBT_HCA2_FC;
2518 2511 hca_attr->hca_rfci_max_log2_qp = 7; /* 128 per port */
2519 2512 hca_attr->hca_fexch_max_log2_qp = 16; /* 64K per port */
2520 2513 hca_attr->hca_fexch_max_log2_mem = 20; /* 1MB per MPT */
2521 2514 }
2522 2515 #endif
2523 2516
2524 2517 hca_attr->hca_flags = caps;
2525 2518 hca_attr->hca_flags2 = caps2;
2526 2519
2527 2520 /*
2528 2521 * Set hca_attr's IDs
2529 2522 */
2530 2523 hca_attr->hca_vendor_id = state->hs_vendor_id;
2531 2524 hca_attr->hca_device_id = state->hs_device_id;
2532 2525 hca_attr->hca_version_id = state->hs_revision_id;
2533 2526
2534 2527 /*
2535 2528 * Determine number of available QPs and max QP size. Number of
2536 2529 * available QPs is determined by subtracting the number of
2537 2530 * "reserved QPs" (i.e. reserved for firmware use) from the
2538 2531 * total number configured.
2539 2532 */
2540 2533 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_qp);
2541 2534 hca_attr->hca_max_qp = val - ((uint64_t)1 <<
2542 2535 state->hs_devlim.log_rsvd_qp);
2543 2536 maxval = ((uint64_t)1 << state->hs_devlim.log_max_qp_sz);
2544 2537 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_qp_sz);
2545 2538 if (val > maxval) {
2546 2539 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2547 2540 HERMON_ATTACH_MSG(state->hs_attach_buf,
2548 2541 "soft_state_init_maxqpsz_toobig_fail");
2549 2542 return (DDI_FAILURE);
2550 2543 }
2551 2544 /* we need to reduce this by the max space needed for headroom */
2552 2545 hca_attr->hca_max_qp_sz = (uint_t)val - (HERMON_QP_OH_SIZE >>
2553 2546 HERMON_QP_WQE_LOG_MINIMUM) - 1;
2554 2547
2555 2548 /*
2556 2549 * Determine max scatter-gather size in WQEs. The HCA has split
2557 2550 * the max sgl into rec'v Q and send Q values. Use the least.
2558 2551 *
2559 2552 * This is mainly useful for legacy clients. Smart clients
2560 2553 * such as IPoIB will use the IBT_HCA_WQE_SIZE_INFO sgl info.
2561 2554 */
2562 2555 if (state->hs_devlim.max_sg_rq <= state->hs_devlim.max_sg_sq) {
2563 2556 maxval = state->hs_devlim.max_sg_rq;
2564 2557 } else {
2565 2558 maxval = state->hs_devlim.max_sg_sq;
2566 2559 }
2567 2560 val = state->hs_cfg_profile->cp_wqe_max_sgl;
2568 2561 if (val > maxval) {
2569 2562 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2570 2563 HERMON_ATTACH_MSG(state->hs_attach_buf,
2571 2564 "soft_state_init_toomanysgl_fail");
2572 2565 return (DDI_FAILURE);
2573 2566 }
2574 2567 /* If the rounded value for max SGL is too large, cap it */
2575 2568 if (state->hs_cfg_profile->cp_wqe_real_max_sgl > maxval) {
2576 2569 state->hs_cfg_profile->cp_wqe_real_max_sgl = (uint32_t)maxval;
2577 2570 val = maxval;
2578 2571 } else {
2579 2572 val = state->hs_cfg_profile->cp_wqe_real_max_sgl;
2580 2573 }
2581 2574
2582 2575 hca_attr->hca_max_sgl = (uint_t)val;
2583 2576 hca_attr->hca_max_rd_sgl = 0; /* zero because RD is unsupported */
2584 2577
2585 2578 /*
2586 2579 * Determine number of available CQs and max CQ size. Number of
2587 2580 * available CQs is determined by subtracting the number of
2588 2581 * "reserved CQs" (i.e. reserved for firmware use) from the
2589 2582 * total number configured.
2590 2583 */
2591 2584 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_cq);
2592 2585 hca_attr->hca_max_cq = val - ((uint64_t)1 <<
2593 2586 state->hs_devlim.log_rsvd_cq);
2594 2587 maxval = ((uint64_t)1 << state->hs_devlim.log_max_cq_sz);
2595 2588 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_cq_sz) - 1;
2596 2589 if (val > maxval) {
2597 2590 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2598 2591 HERMON_ATTACH_MSG(state->hs_attach_buf,
2599 2592 "soft_state_init_maxcqsz_toobig_fail");
2600 2593 return (DDI_FAILURE);
2601 2594 }
2602 2595 hca_attr->hca_max_cq_sz = (uint_t)val;
2603 2596
2604 2597 /*
2605 2598 * Determine number of available SRQs and max SRQ size. Number of
2606 2599 * available SRQs is determined by subtracting the number of
2607 2600 * "reserved SRQs" (i.e. reserved for firmware use) from the
2608 2601 * total number configured.
2609 2602 */
2610 2603 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_srq);
2611 2604 hca_attr->hca_max_srqs = val - ((uint64_t)1 <<
2612 2605 state->hs_devlim.log_rsvd_srq);
2613 2606 maxval = ((uint64_t)1 << state->hs_devlim.log_max_srq_sz);
2614 2607 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_srq_sz);
2615 2608
2616 2609 if (val > maxval) {
2617 2610 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2618 2611 HERMON_ATTACH_MSG(state->hs_attach_buf,
2619 2612 "soft_state_init_maxsrqsz_toobig_fail");
2620 2613 return (DDI_FAILURE);
2621 2614 }
2622 2615 hca_attr->hca_max_srqs_sz = (uint_t)val;
2623 2616
2624 2617 val = hca_attr->hca_recv_sgl_sz - 1; /* SRQ has a list link */
2625 2618 maxval = state->hs_devlim.max_sg_rq - 1;
2626 2619 if (val > maxval) {
2627 2620 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2628 2621 HERMON_ATTACH_MSG(state->hs_attach_buf,
2629 2622 "soft_state_init_toomanysrqsgl_fail");
2630 2623 return (DDI_FAILURE);
2631 2624 }
2632 2625 hca_attr->hca_max_srq_sgl = (uint_t)val;
2633 2626
2634 2627 /*
2635 2628 * Determine supported HCA page sizes
2636 2629 * XXX
2637 2630 * For now we simply return the system pagesize as the only supported
2638 2631 * pagesize
2639 2632 */
2640 2633 hca_attr->hca_page_sz = ((PAGESIZE == (1 << 13)) ? IBT_PAGE_8K :
2641 2634 IBT_PAGE_4K);
2642 2635
2643 2636 /*
2644 2637 * Determine number of available MemReg, MemWin, and their max size.
2645 2638 * Number of available MRs and MWs is determined by subtracting
2646 2639 * the number of "reserved MPTs" (i.e. reserved for firmware use)
2647 2640 * from the total number configured for each.
2648 2641 */
2649 2642 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_dmpt);
2650 2643 hca_attr->hca_max_memr = val - ((uint64_t)1 <<
2651 2644 state->hs_devlim.log_rsvd_dmpt);
2652 2645 hca_attr->hca_max_mem_win = state->hs_devlim.mem_win ? (val -
2653 2646 ((uint64_t)1 << state->hs_devlim.log_rsvd_dmpt)) : 0;
2654 2647 maxval = state->hs_devlim.log_max_mrw_sz;
2655 2648 val = state->hs_cfg_profile->cp_log_max_mrw_sz;
2656 2649 if (val > maxval) {
2657 2650 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2658 2651 HERMON_ATTACH_MSG(state->hs_attach_buf,
2659 2652 "soft_state_init_maxmrwsz_toobig_fail");
2660 2653 return (DDI_FAILURE);
2661 2654 }
2662 2655 hca_attr->hca_max_memr_len = ((uint64_t)1 << val);
2663 2656
2664 2657 /* Determine RDMA/Atomic properties */
2665 2658 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_rdb);
2666 2659 hca_attr->hca_max_rsc = (uint_t)val;
2667 2660 val = state->hs_cfg_profile->cp_hca_max_rdma_in_qp;
2668 2661 hca_attr->hca_max_rdma_in_qp = (uint8_t)val;
2669 2662 val = state->hs_cfg_profile->cp_hca_max_rdma_out_qp;
2670 2663 hca_attr->hca_max_rdma_out_qp = (uint8_t)val;
2671 2664 hca_attr->hca_max_rdma_in_ee = 0;
2672 2665 hca_attr->hca_max_rdma_out_ee = 0;
2673 2666
2674 2667 /*
2675 2668 * Determine maximum number of raw IPv6 and Ether QPs. Set to 0
2676 2669 * because neither type of raw QP is supported
2677 2670 */
2678 2671 hca_attr->hca_max_ipv6_qp = 0;
2679 2672 hca_attr->hca_max_ether_qp = 0;
2680 2673
2681 2674 /* Determine max number of MCGs and max QP-per-MCG */
2682 2675 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_qp);
2683 2676 hca_attr->hca_max_mcg_qps = (uint_t)val;
2684 2677 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_mcg);
2685 2678 hca_attr->hca_max_mcg = (uint_t)val;
2686 2679 val = state->hs_cfg_profile->cp_num_qp_per_mcg;
2687 2680 hca_attr->hca_max_qp_per_mcg = (uint_t)val;
2688 2681
2689 2682 /* Determine max number partitions (i.e. PKeys) */
2690 2683 maxval = ((uint64_t)state->hs_cfg_profile->cp_num_ports <<
2691 2684 state->hs_queryport.log_max_pkey);
2692 2685 val = ((uint64_t)state->hs_cfg_profile->cp_num_ports <<
2693 2686 state->hs_cfg_profile->cp_log_max_pkeytbl);
2694 2687
2695 2688 if (val > maxval) {
2696 2689 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2697 2690 HERMON_ATTACH_MSG(state->hs_attach_buf,
2698 2691 "soft_state_init_toomanypkey_fail");
2699 2692 return (DDI_FAILURE);
2700 2693 }
2701 2694 hca_attr->hca_max_partitions = (uint16_t)val;
2702 2695
2703 2696 /* Determine number of ports */
2704 2697 maxval = state->hs_devlim.num_ports;
2705 2698 val = state->hs_cfg_profile->cp_num_ports;
2706 2699 if ((val > maxval) || (val == 0)) {
2707 2700 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2708 2701 HERMON_ATTACH_MSG(state->hs_attach_buf,
2709 2702 "soft_state_init_toomanyports_fail");
2710 2703 return (DDI_FAILURE);
2711 2704 }
2712 2705 hca_attr->hca_nports = (uint8_t)val;
2713 2706
2714 2707 /* Copy NodeGUID and SystemImageGUID from softstate */
2715 2708 hca_attr->hca_node_guid = state->hs_nodeguid;
2716 2709 hca_attr->hca_si_guid = state->hs_sysimgguid;
2717 2710
2718 2711 /*
2719 2712 * Determine local ACK delay. Use the value suggested by the Hermon
2720 2713 * hardware (from the QUERY_DEV_CAP command)
2721 2714 */
2722 2715 hca_attr->hca_local_ack_delay = state->hs_devlim.ca_ack_delay;
2723 2716
2724 2717 /* Determine max SGID table and PKey table sizes */
2725 2718 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_gidtbl);
2726 2719 hca_attr->hca_max_port_sgid_tbl_sz = (uint_t)val;
2727 2720 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_max_pkeytbl);
2728 2721 hca_attr->hca_max_port_pkey_tbl_sz = (uint16_t)val;
2729 2722
2730 2723 /* Determine max number of PDs */
2731 2724 maxval = ((uint64_t)1 << state->hs_devlim.log_max_pd);
2732 2725 val = ((uint64_t)1 << state->hs_cfg_profile->cp_log_num_pd);
2733 2726 if (val > maxval) {
2734 2727 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2735 2728 HERMON_ATTACH_MSG(state->hs_attach_buf,
2736 2729 "soft_state_init_toomanypd_fail");
2737 2730 return (DDI_FAILURE);
2738 2731 }
2739 2732 hca_attr->hca_max_pd = (uint_t)val;
2740 2733
2741 2734 /* Determine max number of Address Handles (NOT IN ARBEL or HERMON) */
2742 2735 hca_attr->hca_max_ah = 0;
2743 2736
2744 2737 /* No RDDs or EECs (since Reliable Datagram is not supported) */
2745 2738 hca_attr->hca_max_rdd = 0;
2746 2739 hca_attr->hca_max_eec = 0;
2747 2740
2748 2741 /* Initialize lock for reserved UAR page access */
2749 2742 mutex_init(&state->hs_uar_lock, NULL, MUTEX_DRIVER,
2750 2743 DDI_INTR_PRI(state->hs_intrmsi_pri));
2751 2744
2752 2745 /* Initialize the flash fields */
2753 2746 state->hs_fw_flashstarted = 0;
2754 2747 mutex_init(&state->hs_fw_flashlock, NULL, MUTEX_DRIVER,
2755 2748 DDI_INTR_PRI(state->hs_intrmsi_pri));
2756 2749
2757 2750 /* Initialize the lock for the info ioctl */
2758 2751 mutex_init(&state->hs_info_lock, NULL, MUTEX_DRIVER,
2759 2752 DDI_INTR_PRI(state->hs_intrmsi_pri));
2760 2753
2761 2754 /* Initialize the AVL tree for QP number support */
2762 2755 hermon_qpn_avl_init(state);
2763 2756
2764 2757 /* Initialize the cq_sched info structure */
2765 2758 status = hermon_cq_sched_init(state);
2766 2759 if (status != DDI_SUCCESS) {
2767 2760 hermon_qpn_avl_fini(state);
2768 2761 mutex_destroy(&state->hs_info_lock);
2769 2762 mutex_destroy(&state->hs_fw_flashlock);
2770 2763 mutex_destroy(&state->hs_uar_lock);
2771 2764 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2772 2765 HERMON_ATTACH_MSG(state->hs_attach_buf,
2773 2766 "soft_state_init_cqsched_init_fail");
2774 2767 return (DDI_FAILURE);
2775 2768 }
2776 2769
2777 2770 /* Initialize the fcoib info structure */
2778 2771 status = hermon_fcoib_init(state);
2779 2772 if (status != DDI_SUCCESS) {
2780 2773 hermon_cq_sched_fini(state);
2781 2774 hermon_qpn_avl_fini(state);
2782 2775 mutex_destroy(&state->hs_info_lock);
2783 2776 mutex_destroy(&state->hs_fw_flashlock);
2784 2777 mutex_destroy(&state->hs_uar_lock);
2785 2778 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2786 2779 HERMON_ATTACH_MSG(state->hs_attach_buf,
2787 2780 "soft_state_init_fcoibinit_fail");
2788 2781 return (DDI_FAILURE);
2789 2782 }
2790 2783
2791 2784 /* Initialize the kstat info structure */
2792 2785 status = hermon_kstat_init(state);
2793 2786 if (status != DDI_SUCCESS) {
2794 2787 hermon_fcoib_fini(state);
2795 2788 hermon_cq_sched_fini(state);
2796 2789 hermon_qpn_avl_fini(state);
2797 2790 mutex_destroy(&state->hs_info_lock);
2798 2791 mutex_destroy(&state->hs_fw_flashlock);
2799 2792 mutex_destroy(&state->hs_uar_lock);
2800 2793 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
2801 2794 HERMON_ATTACH_MSG(state->hs_attach_buf,
2802 2795 "soft_state_init_kstatinit_fail");
2803 2796 return (DDI_FAILURE);
2804 2797 }
2805 2798
2806 2799 return (DDI_SUCCESS);
2807 2800 }
2808 2801
2809 2802
2810 2803 /*
2811 2804 * hermon_soft_state_fini()
2812 2805 * Context: Called only from detach() path context
2813 2806 */
2814 2807 static void
2815 2808 hermon_soft_state_fini(hermon_state_t *state)
2816 2809 {
2817 2810
2818 2811 /* Teardown the kstat info */
2819 2812 hermon_kstat_fini(state);
2820 2813
2821 2814 /* Teardown the fcoib info */
2822 2815 hermon_fcoib_fini(state);
2823 2816
2824 2817 /* Teardown the cq_sched info */
2825 2818 hermon_cq_sched_fini(state);
2826 2819
2827 2820 /* Teardown the AVL tree for QP number support */
2828 2821 hermon_qpn_avl_fini(state);
2829 2822
2830 2823 /* Free up info ioctl mutex */
2831 2824 mutex_destroy(&state->hs_info_lock);
2832 2825
2833 2826 /* Free up flash mutex */
2834 2827 mutex_destroy(&state->hs_fw_flashlock);
2835 2828
2836 2829 /* Free up the UAR page access mutex */
2837 2830 mutex_destroy(&state->hs_uar_lock);
2838 2831
2839 2832 /* Free up the hca_attr struct */
2840 2833 kmem_free(state->hs_ibtfinfo.hca_attr, sizeof (ibt_hca_attr_t));
2841 2834
2842 2835 }
2843 2836
2844 2837 /*
2845 2838 * hermon_icm_config_setup()
2846 2839 * Context: Only called from attach() path context
2847 2840 */
2848 2841 static int
2849 2842 hermon_icm_config_setup(hermon_state_t *state,
2850 2843 hermon_hw_initqueryhca_t *inithca)
2851 2844 {
2852 2845 hermon_hw_querydevlim_t *devlim;
2853 2846 hermon_cfg_profile_t *cfg;
2854 2847 hermon_icm_table_t *icm_p[HERMON_NUM_ICM_RESOURCES];
2855 2848 hermon_icm_table_t *icm;
2856 2849 hermon_icm_table_t *tmp;
2857 2850 uint64_t icm_addr;
2858 2851 uint64_t icm_size;
2859 2852 int status, i, j;
2860 2853
2861 2854
2862 2855 /* Bring in local devlims, cfg_profile and hs_icm table list */
2863 2856 devlim = &state->hs_devlim;
2864 2857 cfg = state->hs_cfg_profile;
2865 2858 icm = state->hs_icm;
2866 2859
2867 2860 /*
2868 2861 * Assign each ICM table's entry size from data in the devlims,
2869 2862 * except for RDB and MCG sizes, which are not returned in devlims
2870 2863 * but do have a fixed size, and the UAR context entry size, which
2871 2864 * we determine. For this, we use the "cp_num_pgs_per_uce" value
2872 2865 * from our hs_cfg_profile.
2873 2866 */
2874 2867 icm[HERMON_CMPT].object_size = devlim->cmpt_entry_sz;
2875 2868 icm[HERMON_CMPT_QPC].object_size = devlim->cmpt_entry_sz;
2876 2869 icm[HERMON_CMPT_SRQC].object_size = devlim->cmpt_entry_sz;
2877 2870 icm[HERMON_CMPT_CQC].object_size = devlim->cmpt_entry_sz;
2878 2871 icm[HERMON_CMPT_EQC].object_size = devlim->cmpt_entry_sz;
2879 2872 icm[HERMON_MTT].object_size = devlim->mtt_entry_sz;
2880 2873 icm[HERMON_DMPT].object_size = devlim->dmpt_entry_sz;
2881 2874 icm[HERMON_QPC].object_size = devlim->qpc_entry_sz;
2882 2875 icm[HERMON_CQC].object_size = devlim->cqc_entry_sz;
2883 2876 icm[HERMON_SRQC].object_size = devlim->srq_entry_sz;
2884 2877 icm[HERMON_EQC].object_size = devlim->eqc_entry_sz;
2885 2878 icm[HERMON_RDB].object_size = devlim->rdmardc_entry_sz *
2886 2879 cfg->cp_hca_max_rdma_in_qp;
2887 2880 icm[HERMON_MCG].object_size = HERMON_MCGMEM_SZ(state);
2888 2881 icm[HERMON_ALTC].object_size = devlim->altc_entry_sz;
2889 2882 icm[HERMON_AUXC].object_size = devlim->aux_entry_sz;
2890 2883
2891 2884 /* Assign each ICM table's log2 number of entries */
2892 2885 icm[HERMON_CMPT].log_num_entries = cfg->cp_log_num_cmpt;
2893 2886 icm[HERMON_CMPT_QPC].log_num_entries = cfg->cp_log_num_qp;
2894 2887 icm[HERMON_CMPT_SRQC].log_num_entries = cfg->cp_log_num_srq;
2895 2888 icm[HERMON_CMPT_CQC].log_num_entries = cfg->cp_log_num_cq;
2896 2889 icm[HERMON_CMPT_EQC].log_num_entries = HERMON_NUM_EQ_SHIFT;
2897 2890 icm[HERMON_MTT].log_num_entries = cfg->cp_log_num_mtt;
2898 2891 icm[HERMON_DMPT].log_num_entries = cfg->cp_log_num_dmpt;
2899 2892 icm[HERMON_QPC].log_num_entries = cfg->cp_log_num_qp;
2900 2893 icm[HERMON_SRQC].log_num_entries = cfg->cp_log_num_srq;
2901 2894 icm[HERMON_CQC].log_num_entries = cfg->cp_log_num_cq;
2902 2895 icm[HERMON_EQC].log_num_entries = HERMON_NUM_EQ_SHIFT;
2903 2896 icm[HERMON_RDB].log_num_entries = cfg->cp_log_num_qp;
2904 2897 icm[HERMON_MCG].log_num_entries = cfg->cp_log_num_mcg;
2905 2898 icm[HERMON_ALTC].log_num_entries = cfg->cp_log_num_qp;
2906 2899 icm[HERMON_AUXC].log_num_entries = cfg->cp_log_num_qp;
2907 2900
2908 2901 /* Initialize the ICM tables */
2909 2902 hermon_icm_tables_init(state);
2910 2903
2911 2904 /*
2912 2905 * ICM tables must be aligned on their size in the ICM address
2913 2906 * space. So, here we order the tables from largest total table
2914 2907 * size to the smallest. All tables are a power of 2 in size, so
2915 2908 * this will ensure that all tables are aligned on their own size
2916 2909 * without wasting space in the ICM.
2917 2910 *
2918 2911 * In order to easily set the ICM addresses without needing to
2919 2912 * worry about the ordering of our table indices as relates to
2920 2913 * the hermon_rsrc_type_t enum, we will use a list of pointers
2921 2914 * representing the tables for the sort, then assign ICM addresses
2922 2915 * below using it.
2923 2916 */
2924 2917 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
2925 2918 icm_p[i] = &icm[i];
2926 2919 }
2927 2920 for (i = HERMON_NUM_ICM_RESOURCES; i > 0; i--) {
2928 2921 switch (i) {
2929 2922 case HERMON_CMPT_QPC:
2930 2923 case HERMON_CMPT_SRQC:
2931 2924 case HERMON_CMPT_CQC:
2932 2925 case HERMON_CMPT_EQC:
2933 2926 continue;
2934 2927 }
2935 2928 for (j = 1; j < i; j++) {
2936 2929 if (icm_p[j]->table_size > icm_p[j - 1]->table_size) {
2937 2930 tmp = icm_p[j];
2938 2931 icm_p[j] = icm_p[j - 1];
2939 2932 icm_p[j - 1] = tmp;
2940 2933 }
2941 2934 }
2942 2935 }
2943 2936
2944 2937 /* Initialize the ICM address and ICM size */
2945 2938 icm_addr = icm_size = 0;
2946 2939
2947 2940 /*
2948 2941 * Set the ICM base address of each table, using our sorted
2949 2942 * list of pointers from above.
2950 2943 */
2951 2944 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
2952 2945 j = icm_p[i]->icm_type;
2953 2946 switch (j) {
2954 2947 case HERMON_CMPT_QPC:
2955 2948 case HERMON_CMPT_SRQC:
2956 2949 case HERMON_CMPT_CQC:
2957 2950 case HERMON_CMPT_EQC:
2958 2951 continue;
2959 2952 }
2960 2953 if (icm[j].table_size) {
2961 2954 /*
2962 2955 * Set the ICM base address in the table, save the
2963 2956 * ICM offset in the rsrc pool and increment the
2964 2957 * total ICM allocation.
2965 2958 */
2966 2959 icm[j].icm_baseaddr = icm_addr;
2967 2960 if (hermon_verbose) {
2968 2961 IBTF_DPRINTF_L2("ICMADDR", "rsrc %x @ %p"
2969 2962 " size %llx", j, icm[j].icm_baseaddr,
2970 2963 icm[j].table_size);
2971 2964 }
2972 2965 icm_size += icm[j].table_size;
2973 2966 }
2974 2967
2975 2968 /* Verify that we don't exceed maximum ICM size */
2976 2969 if (icm_size > devlim->max_icm_size) {
2977 2970 /* free the ICM table memory resources */
2978 2971 hermon_icm_tables_fini(state);
2979 2972 cmn_err(CE_WARN, "ICM configuration exceeds maximum "
2980 2973 "configuration: max (0x%lx) requested (0x%lx)\n",
2981 2974 (ulong_t)devlim->max_icm_size, (ulong_t)icm_size);
2982 2975 HERMON_ATTACH_MSG(state->hs_attach_buf,
2983 2976 "icm_config_toobig_fail");
2984 2977 return (DDI_FAILURE);
2985 2978 }
2986 2979
2987 2980 /* assign address to the 4 pieces of the CMPT */
2988 2981 if (j == HERMON_CMPT) {
2989 2982 uint64_t cmpt_size = icm[j].table_size >> 2;
2990 2983 #define init_cmpt_icm_baseaddr(rsrc, indx) \
2991 2984 icm[rsrc].icm_baseaddr = icm_addr + (indx * cmpt_size);
2992 2985 init_cmpt_icm_baseaddr(HERMON_CMPT_QPC, 0);
2993 2986 init_cmpt_icm_baseaddr(HERMON_CMPT_SRQC, 1);
2994 2987 init_cmpt_icm_baseaddr(HERMON_CMPT_CQC, 2);
2995 2988 init_cmpt_icm_baseaddr(HERMON_CMPT_EQC, 3);
2996 2989 }
2997 2990
2998 2991 /* Increment the ICM address for the next table */
2999 2992 icm_addr += icm[j].table_size;
3000 2993 }
3001 2994
3002 2995 /* Populate the structure for the INIT_HCA command */
3003 2996 hermon_inithca_set(state, inithca);
3004 2997
3005 2998 /*
3006 2999 * Prior to invoking INIT_HCA, we must have ICM memory in place
3007 3000 * for the reserved objects in each table. We will allocate and map
3008 3001 * this initial ICM memory here. Note that given the assignment
3009 3002 * of span_size above, tables that are smaller or equal in total
3010 3003 * size to the default span_size will be mapped in full.
3011 3004 */
3012 3005 status = hermon_icm_dma_init(state);
3013 3006 if (status != DDI_SUCCESS) {
3014 3007 /* free the ICM table memory resources */
3015 3008 hermon_icm_tables_fini(state);
3016 3009 HERMON_WARNING(state, "Failed to allocate initial ICM");
3017 3010 HERMON_ATTACH_MSG(state->hs_attach_buf,
3018 3011 "icm_config_dma_init_fail");
3019 3012 return (DDI_FAILURE);
3020 3013 }
3021 3014
3022 3015 return (DDI_SUCCESS);
3023 3016 }
3024 3017
3025 3018 /*
3026 3019 * hermon_inithca_set()
3027 3020 * Context: Only called from attach() path context
3028 3021 */
3029 3022 static void
3030 3023 hermon_inithca_set(hermon_state_t *state, hermon_hw_initqueryhca_t *inithca)
3031 3024 {
3032 3025 hermon_cfg_profile_t *cfg;
3033 3026 hermon_icm_table_t *icm;
3034 3027 int i;
3035 3028
3036 3029
3037 3030 /* Populate the INIT_HCA structure */
3038 3031 icm = state->hs_icm;
3039 3032 cfg = state->hs_cfg_profile;
3040 3033
3041 3034 /* set version */
3042 3035 inithca->version = 0x02; /* PRM 0.36 */
3043 3036 /* set cacheline - log2 in 16-byte chunks */
3044 3037 inithca->log2_cacheline = 0x2; /* optimized for 64 byte cache */
3045 3038
3046 3039 /* we need to update the inithca info with thie UAR info too */
3047 3040 inithca->uar.log_max_uars = highbit(cfg->cp_log_num_uar);
3048 3041 inithca->uar.uar_pg_sz = PAGESHIFT - HERMON_PAGESHIFT;
3049 3042
3050 3043 /* Set endianess */
3051 3044 #ifdef _LITTLE_ENDIAN
3052 3045 inithca->big_endian = 0;
3053 3046 #else
3054 3047 inithca->big_endian = 1;
3055 3048 #endif
3056 3049
3057 3050 /* Port Checking is on by default */
3058 3051 inithca->udav_port_chk = HERMON_UDAV_PORTCHK_ENABLED;
3059 3052
3060 3053 /* Enable IPoIB checksum */
3061 3054 if (state->hs_devlim.ipoib_cksm)
3062 3055 inithca->chsum_en = 1;
3063 3056
3064 3057 /* Set each ICM table's attributes */
3065 3058 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
3066 3059 switch (icm[i].icm_type) {
3067 3060 case HERMON_CMPT:
3068 3061 inithca->tpt.cmpt_baseaddr = icm[i].icm_baseaddr;
3069 3062 break;
3070 3063
3071 3064 case HERMON_MTT:
3072 3065 inithca->tpt.mtt_baseaddr = icm[i].icm_baseaddr;
3073 3066 break;
3074 3067
3075 3068 case HERMON_DMPT:
3076 3069 inithca->tpt.dmpt_baseaddr = icm[i].icm_baseaddr;
3077 3070 inithca->tpt.log_dmpt_sz = icm[i].log_num_entries;
3078 3071 inithca->tpt.pgfault_rnr_to = 0; /* just in case */
3079 3072 break;
3080 3073
3081 3074 case HERMON_QPC:
3082 3075 inithca->context.log_num_qp = icm[i].log_num_entries;
3083 3076 inithca->context.qpc_baseaddr_h =
3084 3077 icm[i].icm_baseaddr >> 32;
3085 3078 inithca->context.qpc_baseaddr_l =
3086 3079 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3087 3080 break;
3088 3081
3089 3082 case HERMON_CQC:
3090 3083 inithca->context.log_num_cq = icm[i].log_num_entries;
3091 3084 inithca->context.cqc_baseaddr_h =
3092 3085 icm[i].icm_baseaddr >> 32;
3093 3086 inithca->context.cqc_baseaddr_l =
3094 3087 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3095 3088 break;
3096 3089
3097 3090 case HERMON_SRQC:
3098 3091 inithca->context.log_num_srq = icm[i].log_num_entries;
3099 3092 inithca->context.srqc_baseaddr_h =
3100 3093 icm[i].icm_baseaddr >> 32;
3101 3094 inithca->context.srqc_baseaddr_l =
3102 3095 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3103 3096 break;
3104 3097
3105 3098 case HERMON_EQC:
3106 3099 inithca->context.log_num_eq = icm[i].log_num_entries;
3107 3100 inithca->context.eqc_baseaddr_h =
3108 3101 icm[i].icm_baseaddr >> 32;
3109 3102 inithca->context.eqc_baseaddr_l =
3110 3103 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3111 3104 break;
3112 3105
3113 3106 case HERMON_RDB:
3114 3107 inithca->context.rdmardc_baseaddr_h =
3115 3108 icm[i].icm_baseaddr >> 32;
3116 3109 inithca->context.rdmardc_baseaddr_l =
3117 3110 (icm[i].icm_baseaddr & 0xFFFFFFFF) >> 5;
3118 3111 inithca->context.log_num_rdmardc =
3119 3112 cfg->cp_log_num_rdb - cfg->cp_log_num_qp;
3120 3113 break;
3121 3114
3122 3115 case HERMON_MCG:
3123 3116 inithca->multi.mc_baseaddr = icm[i].icm_baseaddr;
3124 3117 inithca->multi.log_mc_tbl_sz = icm[i].log_num_entries;
3125 3118 inithca->multi.log_mc_tbl_ent =
3126 3119 highbit(HERMON_MCGMEM_SZ(state)) - 1;
3127 3120 inithca->multi.log_mc_tbl_hash_sz =
3128 3121 cfg->cp_log_num_mcg_hash;
3129 3122 inithca->multi.mc_hash_fn = HERMON_MCG_DEFAULT_HASH_FN;
3130 3123 break;
3131 3124
3132 3125 case HERMON_ALTC:
3133 3126 inithca->context.altc_baseaddr = icm[i].icm_baseaddr;
3134 3127 break;
3135 3128
3136 3129 case HERMON_AUXC:
3137 3130 inithca->context.auxc_baseaddr = icm[i].icm_baseaddr;
3138 3131 break;
3139 3132
3140 3133 default:
3141 3134 break;
3142 3135
3143 3136 }
3144 3137 }
3145 3138
3146 3139 }
3147 3140
3148 3141 /*
3149 3142 * hermon_icm_tables_init()
3150 3143 * Context: Only called from attach() path context
3151 3144 *
3152 3145 * Dynamic ICM breaks the various ICM tables into "span_size" chunks
3153 3146 * to enable allocation of backing memory on demand. Arbel used a
3154 3147 * fixed size ARBEL_ICM_SPAN_SIZE (initially was 512KB) as the
3155 3148 * span_size for all ICM chunks. Hermon has other considerations,
3156 3149 * so the span_size used differs from Arbel.
3157 3150 *
3158 3151 * The basic considerations for why Hermon differs are:
3159 3152 *
3160 3153 * 1) ICM memory is in units of HERMON pages.
3161 3154 *
3162 3155 * 2) The AUXC table is approximately 1 byte per QP.
3163 3156 *
3164 3157 * 3) ICM memory for AUXC, ALTC, and RDB is allocated when
3165 3158 * the ICM memory for the corresponding QPC is allocated.
3166 3159 *
3167 3160 * 4) ICM memory for the CMPT corresponding to the various primary
3168 3161 * resources (QPC, SRQC, CQC, and EQC) is allocated when the ICM
3169 3162 * memory for the primary resource is allocated.
3170 3163 *
3171 3164 * One HERMON page (4KB) would typically map 4K QPs worth of AUXC.
3172 3165 * So, the minimum chunk for the various QPC related ICM memory should
3173 3166 * all be allocated to support the 4K QPs. Currently, this means the
3174 3167 * amount of memory for the various QP chunks is:
3175 3168 *
3176 3169 * QPC 256*4K bytes
3177 3170 * RDB 128*4K bytes
3178 3171 * CMPT 64*4K bytes
3179 3172 * ALTC 64*4K bytes
3180 3173 * AUXC 1*4K bytes
3181 3174 *
3182 3175 * The span_size chosen for the QP resource is 4KB of AUXC entries,
3183 3176 * or 1 HERMON_PAGESIZE worth, which is the minimum ICM mapping size.
3184 3177 *
3185 3178 * Other ICM resources can have their span_size be more arbitrary.
3186 3179 * This is 4K (HERMON_ICM_SPAN), except for MTTs because they are tiny.
3187 3180 */
3188 3181
3189 3182 /* macro to make the code below cleaner */
3190 3183 #define init_dependent(rsrc, dep) \
3191 3184 icm[dep].span = icm[rsrc].span; \
3192 3185 icm[dep].num_spans = icm[rsrc].num_spans; \
3193 3186 icm[dep].split_shift = icm[rsrc].split_shift; \
3194 3187 icm[dep].span_mask = icm[rsrc].span_mask; \
3195 3188 icm[dep].span_shift = icm[rsrc].span_shift; \
3196 3189 icm[dep].rsrc_mask = icm[rsrc].rsrc_mask; \
3197 3190 if (hermon_verbose) { \
3198 3191 IBTF_DPRINTF_L2("hermon", "tables_init: " \
3199 3192 "rsrc (0x%x) size (0x%lx) span (0x%x) " \
3200 3193 "num_spans (0x%x)", dep, icm[dep].table_size, \
3201 3194 icm[dep].span, icm[dep].num_spans); \
3202 3195 IBTF_DPRINTF_L2("hermon", "tables_init: " \
3203 3196 "span_shift (0x%x) split_shift (0x%x)", \
3204 3197 icm[dep].span_shift, icm[dep].split_shift); \
3205 3198 IBTF_DPRINTF_L2("hermon", "tables_init: " \
3206 3199 "span_mask (0x%x) rsrc_mask (0x%x)", \
3207 3200 icm[dep].span_mask, icm[dep].rsrc_mask); \
3208 3201 }
3209 3202
3210 3203 static void
3211 3204 hermon_icm_tables_init(hermon_state_t *state)
3212 3205 {
3213 3206 hermon_icm_table_t *icm;
3214 3207 int i, k;
3215 3208 uint32_t per_split;
3216 3209
3217 3210
3218 3211 icm = state->hs_icm;
3219 3212
3220 3213 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
3221 3214 icm[i].icm_type = i;
3222 3215 icm[i].num_entries = 1 << icm[i].log_num_entries;
3223 3216 icm[i].log_object_size = highbit(icm[i].object_size) - 1;
3224 3217 icm[i].table_size = icm[i].num_entries <<
3225 3218 icm[i].log_object_size;
3226 3219
3227 3220 /* deal with "dependent" resource types */
3228 3221 switch (i) {
3229 3222 case HERMON_AUXC:
3230 3223 #ifdef HERMON_FW_WORKAROUND
3231 3224 icm[i].table_size = 0x80000000ull;
3232 3225 /* FALLTHROUGH */
3233 3226 #endif
3234 3227 case HERMON_CMPT_QPC:
3235 3228 case HERMON_RDB:
3236 3229 case HERMON_ALTC:
3237 3230 init_dependent(HERMON_QPC, i);
3238 3231 continue;
3239 3232 case HERMON_CMPT_SRQC:
3240 3233 init_dependent(HERMON_SRQC, i);
3241 3234 continue;
3242 3235 case HERMON_CMPT_CQC:
3243 3236 init_dependent(HERMON_CQC, i);
3244 3237 continue;
3245 3238 case HERMON_CMPT_EQC:
3246 3239 init_dependent(HERMON_EQC, i);
3247 3240 continue;
3248 3241 }
3249 3242
3250 3243 icm[i].span = HERMON_ICM_SPAN; /* default #rsrc's in 1 span */
3251 3244 if (i == HERMON_MTT) /* Alloc enough MTTs to map 256MB */
3252 3245 icm[i].span = HERMON_ICM_SPAN * 16;
3253 3246 icm[i].num_spans = icm[i].num_entries / icm[i].span;
3254 3247 if (icm[i].num_spans == 0) {
3255 3248 icm[i].span = icm[i].num_entries;
3256 3249 per_split = 1;
3257 3250 icm[i].num_spans = icm[i].num_entries / icm[i].span;
3258 3251 } else {
3259 3252 per_split = icm[i].num_spans / HERMON_ICM_SPLIT;
3260 3253 if (per_split == 0) {
3261 3254 per_split = 1;
3262 3255 }
3263 3256 }
3264 3257 if (hermon_verbose)
3265 3258 IBTF_DPRINTF_L2("ICM", "rsrc %x span %x num_spans %x",
3266 3259 i, icm[i].span, icm[i].num_spans);
3267 3260
3268 3261 /*
3269 3262 * Ensure a minimum table size of an ICM page, and a
3270 3263 * maximum span size of the ICM table size. This ensures
3271 3264 * that we don't have less than an ICM page to map, which is
3272 3265 * impossible, and that we will map an entire table at
3273 3266 * once if it's total size is less than the span size.
3274 3267 */
3275 3268 icm[i].table_size = max(icm[i].table_size, HERMON_PAGESIZE);
3276 3269
3277 3270 icm[i].span_shift = 0;
3278 3271 for (k = icm[i].span; k != 1; k >>= 1)
3279 3272 icm[i].span_shift++;
3280 3273 icm[i].split_shift = icm[i].span_shift;
3281 3274 for (k = per_split; k != 1; k >>= 1)
3282 3275 icm[i].split_shift++;
3283 3276 icm[i].span_mask = (1 << icm[i].split_shift) -
3284 3277 (1 << icm[i].span_shift);
3285 3278 icm[i].rsrc_mask = (1 << icm[i].span_shift) - 1;
3286 3279
3287 3280
3288 3281 /* Initialize the table lock */
3289 3282 mutex_init(&icm[i].icm_table_lock, NULL, MUTEX_DRIVER,
3290 3283 DDI_INTR_PRI(state->hs_intrmsi_pri));
3291 3284 cv_init(&icm[i].icm_table_cv, NULL, CV_DRIVER, NULL);
3292 3285
3293 3286 if (hermon_verbose) {
3294 3287 IBTF_DPRINTF_L2("hermon", "tables_init: "
3295 3288 "rsrc (0x%x) size (0x%lx)", i, icm[i].table_size);
3296 3289 IBTF_DPRINTF_L2("hermon", "tables_init: "
3297 3290 "span (0x%x) num_spans (0x%x)",
3298 3291 icm[i].span, icm[i].num_spans);
3299 3292 IBTF_DPRINTF_L2("hermon", "tables_init: "
3300 3293 "span_shift (0x%x) split_shift (0x%x)",
3301 3294 icm[i].span_shift, icm[i].split_shift);
3302 3295 IBTF_DPRINTF_L2("hermon", "tables_init: "
3303 3296 "span_mask (0x%x) rsrc_mask (0x%x)",
3304 3297 icm[i].span_mask, icm[i].rsrc_mask);
3305 3298 }
3306 3299 }
3307 3300
3308 3301 }
3309 3302
3310 3303 /*
3311 3304 * hermon_icm_tables_fini()
3312 3305 * Context: Only called from attach() path context
3313 3306 *
3314 3307 * Clean up all icm_tables. Free the bitmap and dma_info arrays.
3315 3308 */
3316 3309 static void
3317 3310 hermon_icm_tables_fini(hermon_state_t *state)
3318 3311 {
3319 3312 hermon_icm_table_t *icm;
3320 3313 int nspans;
3321 3314 int i, j;
3322 3315
3323 3316
3324 3317 icm = state->hs_icm;
3325 3318
3326 3319 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
3327 3320
3328 3321 mutex_enter(&icm[i].icm_table_lock);
3329 3322 nspans = icm[i].num_spans;
3330 3323
3331 3324 for (j = 0; j < HERMON_ICM_SPLIT; j++) {
3332 3325 if (icm[i].icm_dma[j])
3333 3326 /* Free the ICM DMA slots */
3334 3327 kmem_free(icm[i].icm_dma[j],
3335 3328 nspans * sizeof (hermon_dma_info_t));
3336 3329
3337 3330 if (icm[i].icm_bitmap[j])
3338 3331 /* Free the table bitmap */
3339 3332 kmem_free(icm[i].icm_bitmap[j],
3340 3333 (nspans + 7) / 8);
3341 3334 }
3342 3335 /* Destroy the table lock */
3343 3336 cv_destroy(&icm[i].icm_table_cv);
3344 3337 mutex_exit(&icm[i].icm_table_lock);
3345 3338 mutex_destroy(&icm[i].icm_table_lock);
3346 3339 }
3347 3340
3348 3341 }
3349 3342
3350 3343 /*
3351 3344 * hermon_icm_dma_init()
3352 3345 * Context: Only called from attach() path context
3353 3346 */
3354 3347 static int
3355 3348 hermon_icm_dma_init(hermon_state_t *state)
3356 3349 {
3357 3350 hermon_icm_table_t *icm;
3358 3351 hermon_rsrc_type_t type;
3359 3352 int status;
3360 3353
3361 3354
3362 3355 /*
3363 3356 * This routine will allocate initial ICM DMA resources for ICM
3364 3357 * tables that have reserved ICM objects. This is the only routine
3365 3358 * where we should have to allocate ICM outside of hermon_rsrc_alloc().
3366 3359 * We need to allocate ICM here explicitly, rather than in
3367 3360 * hermon_rsrc_alloc(), because we've not yet completed the resource
3368 3361 * pool initialization. When the resource pools are initialized
3369 3362 * (in hermon_rsrc_init_phase2(), see hermon_rsrc.c for more
3370 3363 * information), resource preallocations will be invoked to match
3371 3364 * the ICM allocations seen here. We will then be able to use the
3372 3365 * normal allocation path. Note we don't need to set a refcnt on
3373 3366 * these initial allocations because that will be done in the calls
3374 3367 * to hermon_rsrc_alloc() from hermon_hw_entries_init() for the
3375 3368 * "prealloc" objects (see hermon_rsrc.c for more information).
3376 3369 */
3377 3370 for (type = 0; type < HERMON_NUM_ICM_RESOURCES; type++) {
3378 3371
3379 3372 /* ICM for these is allocated within hermon_icm_alloc() */
3380 3373 switch (type) {
3381 3374 case HERMON_CMPT:
3382 3375 case HERMON_CMPT_QPC:
3383 3376 case HERMON_CMPT_SRQC:
3384 3377 case HERMON_CMPT_CQC:
3385 3378 case HERMON_CMPT_EQC:
3386 3379 case HERMON_AUXC:
3387 3380 case HERMON_ALTC:
3388 3381 case HERMON_RDB:
3389 3382 continue;
3390 3383 }
3391 3384
3392 3385 icm = &state->hs_icm[type];
3393 3386
3394 3387 mutex_enter(&icm->icm_table_lock);
3395 3388 status = hermon_icm_alloc(state, type, 0, 0);
3396 3389 mutex_exit(&icm->icm_table_lock);
3397 3390 if (status != DDI_SUCCESS) {
3398 3391 while (type--) {
3399 3392 icm = &state->hs_icm[type];
3400 3393 mutex_enter(&icm->icm_table_lock);
3401 3394 hermon_icm_free(state, type, 0, 0);
3402 3395 mutex_exit(&icm->icm_table_lock);
3403 3396 }
3404 3397 return (DDI_FAILURE);
3405 3398 }
3406 3399
3407 3400 if (hermon_verbose) {
3408 3401 IBTF_DPRINTF_L2("hermon", "hermon_icm_dma_init: "
3409 3402 "table (0x%x) index (0x%x) allocated", type, 0);
3410 3403 }
3411 3404 }
3412 3405
3413 3406 return (DDI_SUCCESS);
3414 3407 }
3415 3408
3416 3409 /*
3417 3410 * hermon_icm_dma_fini()
3418 3411 * Context: Only called from attach() path context
3419 3412 *
3420 3413 * ICM has been completely unmapped. We just free the memory here.
3421 3414 */
3422 3415 static void
3423 3416 hermon_icm_dma_fini(hermon_state_t *state)
3424 3417 {
3425 3418 hermon_icm_table_t *icm;
3426 3419 hermon_dma_info_t *dma_info;
3427 3420 hermon_rsrc_type_t type;
3428 3421 int index1, index2;
3429 3422
3430 3423
3431 3424 for (type = 0; type < HERMON_NUM_ICM_RESOURCES; type++) {
3432 3425 icm = &state->hs_icm[type];
3433 3426 for (index1 = 0; index1 < HERMON_ICM_SPLIT; index1++) {
3434 3427 dma_info = icm->icm_dma[index1];
3435 3428 if (dma_info == NULL)
3436 3429 continue;
3437 3430 for (index2 = 0; index2 < icm->num_spans; index2++) {
3438 3431 if (dma_info[index2].dma_hdl)
3439 3432 hermon_dma_free(&dma_info[index2]);
3440 3433 dma_info[index2].dma_hdl = NULL;
3441 3434 }
3442 3435 }
3443 3436 }
3444 3437
3445 3438 }
3446 3439
3447 3440 /*
3448 3441 * hermon_hca_port_init()
3449 3442 * Context: Only called from attach() path context
3450 3443 */
3451 3444 static int
3452 3445 hermon_hca_port_init(hermon_state_t *state)
3453 3446 {
3454 3447 hermon_hw_set_port_t *portinits, *initport;
3455 3448 hermon_cfg_profile_t *cfgprof;
3456 3449 uint_t num_ports;
3457 3450 int i = 0, status;
3458 3451 uint64_t maxval, val;
3459 3452 uint64_t sysimgguid, nodeguid, portguid;
3460 3453
3461 3454
3462 3455 cfgprof = state->hs_cfg_profile;
3463 3456
3464 3457 /* Get number of HCA ports */
3465 3458 num_ports = cfgprof->cp_num_ports;
3466 3459
3467 3460 /* Allocate space for Hermon set port struct(s) */
3468 3461 portinits = (hermon_hw_set_port_t *)kmem_zalloc(num_ports *
3469 3462 sizeof (hermon_hw_set_port_t), KM_SLEEP);
3470 3463
3471 3464
3472 3465
3473 3466 /* Post commands to initialize each Hermon HCA port */
3474 3467 /*
3475 3468 * In Hermon, the process is different than in previous HCAs.
3476 3469 * Here, you have to:
3477 3470 * QUERY_PORT - to get basic information from the HCA
3478 3471 * set the fields accordingly
3479 3472 * SET_PORT - to change/set everything as desired
3480 3473 * INIT_PORT - to bring the port up
3481 3474 *
3482 3475 * Needs to be done for each port in turn
3483 3476 */
3484 3477
3485 3478 for (i = 0; i < num_ports; i++) {
3486 3479 bzero(&state->hs_queryport, sizeof (hermon_hw_query_port_t));
3487 3480 status = hermon_cmn_query_cmd_post(state, QUERY_PORT, 0,
3488 3481 (i + 1), &state->hs_queryport,
3489 3482 sizeof (hermon_hw_query_port_t), HERMON_CMD_NOSLEEP_SPIN);
3490 3483 if (status != HERMON_CMD_SUCCESS) {
3491 3484 cmn_err(CE_CONT, "Hermon: QUERY_PORT (port %02d) "
3492 3485 "command failed: %08x\n", i + 1, status);
3493 3486 goto init_ports_fail;
3494 3487 }
3495 3488 initport = &portinits[i];
3496 3489 state->hs_initport = &portinits[i];
3497 3490
3498 3491 bzero(initport, sizeof (hermon_hw_query_port_t));
3499 3492
3500 3493 /*
3501 3494 * Determine whether we need to override the firmware's
3502 3495 * default SystemImageGUID setting.
3503 3496 */
3504 3497 sysimgguid = cfgprof->cp_sysimgguid;
3505 3498 if (sysimgguid != 0) {
3506 3499 initport->sig = 1;
3507 3500 initport->sys_img_guid = sysimgguid;
3508 3501 }
3509 3502
3510 3503 /*
3511 3504 * Determine whether we need to override the firmware's
3512 3505 * default NodeGUID setting.
3513 3506 */
3514 3507 nodeguid = cfgprof->cp_nodeguid;
3515 3508 if (nodeguid != 0) {
3516 3509 initport->ng = 1;
3517 3510 initport->node_guid = nodeguid;
3518 3511 }
3519 3512
3520 3513 /*
3521 3514 * Determine whether we need to override the firmware's
3522 3515 * default PortGUID setting.
3523 3516 */
3524 3517 portguid = cfgprof->cp_portguid[i];
3525 3518 if (portguid != 0) {
3526 3519 initport->g0 = 1;
3527 3520 initport->guid0 = portguid;
3528 3521 }
3529 3522
3530 3523 /* Validate max MTU size */
3531 3524 maxval = state->hs_queryport.ib_mtu;
3532 3525 val = cfgprof->cp_max_mtu;
3533 3526 if (val > maxval) {
3534 3527 goto init_ports_fail;
3535 3528 }
3536 3529
3537 3530 /* Set mtu_cap to 4096 bytes */
3538 3531 initport->mmc = 1; /* set the change bit */
3539 3532 initport->mtu_cap = 5; /* for 4096 bytes */
3540 3533
3541 3534 /* Validate the max port width */
3542 3535 maxval = state->hs_queryport.ib_port_wid;
3543 3536 val = cfgprof->cp_max_port_width;
3544 3537 if (val > maxval) {
3545 3538 goto init_ports_fail;
3546 3539 }
3547 3540
3548 3541 /* Validate max VL cap size */
3549 3542 maxval = state->hs_queryport.max_vl;
3550 3543 val = cfgprof->cp_max_vlcap;
3551 3544 if (val > maxval) {
3552 3545 goto init_ports_fail;
3553 3546 }
3554 3547
3555 3548 /* Since we're doing mtu_cap, cut vl_cap down */
3556 3549 initport->mvc = 1; /* set this change bit */
3557 3550 initport->vl_cap = 3; /* 3 means vl0-vl3, 4 total */
3558 3551
3559 3552 /* Validate max GID table size */
3560 3553 maxval = ((uint64_t)1 << state->hs_queryport.log_max_gid);
3561 3554 val = ((uint64_t)1 << cfgprof->cp_log_max_gidtbl);
3562 3555 if (val > maxval) {
3563 3556 goto init_ports_fail;
3564 3557 }
3565 3558 initport->max_gid = (uint16_t)val;
3566 3559 initport->mg = 1;
3567 3560
3568 3561 /* Validate max PKey table size */
3569 3562 maxval = ((uint64_t)1 << state->hs_queryport.log_max_pkey);
3570 3563 val = ((uint64_t)1 << cfgprof->cp_log_max_pkeytbl);
3571 3564 if (val > maxval) {
3572 3565 goto init_ports_fail;
3573 3566 }
3574 3567 initport->max_pkey = (uint16_t)val;
3575 3568 initport->mp = 1;
3576 3569 /*
3577 3570 * Post the SET_PORT cmd to Hermon firmware. This sets
3578 3571 * the parameters of the port.
3579 3572 */
3580 3573 status = hermon_set_port_cmd_post(state, initport, i + 1,
3581 3574 HERMON_CMD_NOSLEEP_SPIN);
3582 3575 if (status != HERMON_CMD_SUCCESS) {
3583 3576 cmn_err(CE_CONT, "Hermon: SET_PORT (port %02d) command "
3584 3577 "failed: %08x\n", i + 1, status);
3585 3578 goto init_ports_fail;
3586 3579 }
3587 3580 /* issue another SET_PORT cmd - performance fix/workaround */
3588 3581 /* XXX - need to discuss with Mellanox */
3589 3582 bzero(initport, sizeof (hermon_hw_query_port_t));
3590 3583 initport->cap_mask = 0x02500868;
3591 3584 status = hermon_set_port_cmd_post(state, initport, i + 1,
3592 3585 HERMON_CMD_NOSLEEP_SPIN);
3593 3586 if (status != HERMON_CMD_SUCCESS) {
3594 3587 cmn_err(CE_CONT, "Hermon: SET_PORT (port %02d) command "
3595 3588 "failed: %08x\n", i + 1, status);
3596 3589 goto init_ports_fail;
3597 3590 }
3598 3591 }
3599 3592
3600 3593 /*
3601 3594 * Finally, do the INIT_PORT for each port in turn
3602 3595 * When this command completes, the corresponding Hermon port
3603 3596 * will be physically "Up" and initialized.
3604 3597 */
3605 3598 for (i = 0; i < num_ports; i++) {
3606 3599 status = hermon_init_port_cmd_post(state, i + 1,
3607 3600 HERMON_CMD_NOSLEEP_SPIN);
3608 3601 if (status != HERMON_CMD_SUCCESS) {
3609 3602 cmn_err(CE_CONT, "Hermon: INIT_PORT (port %02d) "
3610 3603 "comman failed: %08x\n", i + 1, status);
3611 3604 goto init_ports_fail;
3612 3605 }
3613 3606 }
3614 3607
3615 3608 /* Free up the memory for Hermon port init struct(s), return success */
3616 3609 kmem_free(portinits, num_ports * sizeof (hermon_hw_set_port_t));
3617 3610 return (DDI_SUCCESS);
3618 3611
3619 3612 init_ports_fail:
3620 3613 /*
3621 3614 * Free up the memory for Hermon port init struct(s), shutdown any
3622 3615 * successfully initialized ports, and return failure
3623 3616 */
3624 3617 kmem_free(portinits, num_ports * sizeof (hermon_hw_set_port_t));
3625 3618 (void) hermon_hca_ports_shutdown(state, i);
3626 3619
3627 3620 return (DDI_FAILURE);
3628 3621 }
3629 3622
3630 3623
3631 3624 /*
3632 3625 * hermon_hca_ports_shutdown()
3633 3626 * Context: Only called from attach() and/or detach() path contexts
3634 3627 */
3635 3628 static int
3636 3629 hermon_hca_ports_shutdown(hermon_state_t *state, uint_t num_init)
3637 3630 {
3638 3631 int i, status;
3639 3632
3640 3633 /*
3641 3634 * Post commands to shutdown all init'd Hermon HCA ports. Note: if
3642 3635 * any of these commands fail for any reason, it would be entirely
3643 3636 * unexpected and probably indicative a serious problem (HW or SW).
3644 3637 * Although we do return void from this function, this type of failure
3645 3638 * should not go unreported. That is why we have the warning message.
3646 3639 */
3647 3640 for (i = 0; i < num_init; i++) {
3648 3641 status = hermon_close_port_cmd_post(state, i + 1,
3649 3642 HERMON_CMD_NOSLEEP_SPIN);
3650 3643 if (status != HERMON_CMD_SUCCESS) {
3651 3644 HERMON_WARNING(state, "failed to shutdown HCA port");
3652 3645 return (status);
3653 3646 }
3654 3647 }
3655 3648 return (HERMON_CMD_SUCCESS);
3656 3649 }
3657 3650
3658 3651
3659 3652 /*
3660 3653 * hermon_internal_uarpg_init
3661 3654 * Context: Only called from attach() path context
3662 3655 */
3663 3656 static int
3664 3657 hermon_internal_uarpg_init(hermon_state_t *state)
3665 3658 {
3666 3659 int status;
3667 3660 hermon_dbr_info_t *info;
3668 3661
3669 3662 /*
3670 3663 * Allocate the UAR page for kernel use. This UAR page is
3671 3664 * the privileged UAR page through which all kernel generated
3672 3665 * doorbells will be rung. There are a number of UAR pages
3673 3666 * reserved by hardware at the front of the UAR BAR, indicated
3674 3667 * by DEVCAP.num_rsvd_uar, which we have already allocated. So,
3675 3668 * the kernel page, or UAR page index num_rsvd_uar, will be
3676 3669 * allocated here for kernel use.
3677 3670 */
3678 3671
3679 3672 status = hermon_rsrc_alloc(state, HERMON_UARPG, 1, HERMON_SLEEP,
3680 3673 &state->hs_uarkpg_rsrc);
3681 3674 if (status != DDI_SUCCESS) {
3682 3675 return (DDI_FAILURE);
3683 3676 }
3684 3677
3685 3678 /* Setup pointer to kernel UAR page */
3686 3679 state->hs_uar = (hermon_hw_uar_t *)state->hs_uarkpg_rsrc->hr_addr;
3687 3680
3688 3681 /* need to set up DBr tracking as well */
3689 3682 status = hermon_dbr_page_alloc(state, &info);
3690 3683 if (status != DDI_SUCCESS) {
3691 3684 return (DDI_FAILURE);
3692 3685 }
3693 3686 state->hs_kern_dbr = info;
3694 3687 return (DDI_SUCCESS);
3695 3688 }
3696 3689
3697 3690
3698 3691 /*
3699 3692 * hermon_internal_uarpg_fini
3700 3693 * Context: Only called from attach() and/or detach() path contexts
3701 3694 */
3702 3695 static void
3703 3696 hermon_internal_uarpg_fini(hermon_state_t *state)
3704 3697 {
3705 3698 /* Free up Hermon UAR page #1 (kernel driver doorbells) */
3706 3699 hermon_rsrc_free(state, &state->hs_uarkpg_rsrc);
3707 3700 }
3708 3701
3709 3702
3710 3703 /*
3711 3704 * hermon_special_qp_contexts_reserve()
3712 3705 * Context: Only called from attach() path context
3713 3706 */
3714 3707 static int
3715 3708 hermon_special_qp_contexts_reserve(hermon_state_t *state)
3716 3709 {
3717 3710 hermon_rsrc_t *qp0_rsrc, *qp1_rsrc, *qp_resvd;
3718 3711 int status;
3719 3712
3720 3713 /* Initialize the lock used for special QP rsrc management */
3721 3714 mutex_init(&state->hs_spec_qplock, NULL, MUTEX_DRIVER,
3722 3715 DDI_INTR_PRI(state->hs_intrmsi_pri));
3723 3716
3724 3717 /*
3725 3718 * Reserve contexts for QP0. These QP contexts will be setup to
3726 3719 * act as aliases for the real QP0. Note: We are required to grab
3727 3720 * two QPs (one per port) even if we are operating in single-port
3728 3721 * mode.
3729 3722 */
3730 3723 status = hermon_rsrc_alloc(state, HERMON_QPC, 2,
3731 3724 HERMON_SLEEP, &qp0_rsrc);
3732 3725 if (status != DDI_SUCCESS) {
3733 3726 mutex_destroy(&state->hs_spec_qplock);
3734 3727 return (DDI_FAILURE);
3735 3728 }
3736 3729 state->hs_spec_qp0 = qp0_rsrc;
3737 3730
3738 3731 /*
3739 3732 * Reserve contexts for QP1. These QP contexts will be setup to
3740 3733 * act as aliases for the real QP1. Note: We are required to grab
3741 3734 * two QPs (one per port) even if we are operating in single-port
3742 3735 * mode.
3743 3736 */
3744 3737 status = hermon_rsrc_alloc(state, HERMON_QPC, 2,
3745 3738 HERMON_SLEEP, &qp1_rsrc);
3746 3739 if (status != DDI_SUCCESS) {
3747 3740 hermon_rsrc_free(state, &qp0_rsrc);
3748 3741 mutex_destroy(&state->hs_spec_qplock);
3749 3742 return (DDI_FAILURE);
3750 3743 }
3751 3744 state->hs_spec_qp1 = qp1_rsrc;
3752 3745
3753 3746 status = hermon_rsrc_alloc(state, HERMON_QPC, 4,
3754 3747 HERMON_SLEEP, &qp_resvd);
3755 3748 if (status != DDI_SUCCESS) {
3756 3749 hermon_rsrc_free(state, &qp1_rsrc);
3757 3750 hermon_rsrc_free(state, &qp0_rsrc);
3758 3751 mutex_destroy(&state->hs_spec_qplock);
3759 3752 return (DDI_FAILURE);
3760 3753 }
3761 3754 state->hs_spec_qp_unused = qp_resvd;
3762 3755
3763 3756 return (DDI_SUCCESS);
3764 3757 }
3765 3758
3766 3759
3767 3760 /*
3768 3761 * hermon_special_qp_contexts_unreserve()
3769 3762 * Context: Only called from attach() and/or detach() path contexts
3770 3763 */
3771 3764 static void
3772 3765 hermon_special_qp_contexts_unreserve(hermon_state_t *state)
3773 3766 {
3774 3767
3775 3768 /* Unreserve contexts for spec_qp_unused */
3776 3769 hermon_rsrc_free(state, &state->hs_spec_qp_unused);
3777 3770
3778 3771 /* Unreserve contexts for QP1 */
3779 3772 hermon_rsrc_free(state, &state->hs_spec_qp1);
3780 3773
3781 3774 /* Unreserve contexts for QP0 */
3782 3775 hermon_rsrc_free(state, &state->hs_spec_qp0);
3783 3776
3784 3777 /* Destroy the lock used for special QP rsrc management */
3785 3778 mutex_destroy(&state->hs_spec_qplock);
3786 3779
3787 3780 }
3788 3781
3789 3782
3790 3783 /*
3791 3784 * hermon_sw_reset()
3792 3785 * Context: Currently called only from attach() path context
3793 3786 */
3794 3787 static int
3795 3788 hermon_sw_reset(hermon_state_t *state)
3796 3789 {
3797 3790 ddi_acc_handle_t hdl = hermon_get_pcihdl(state);
3798 3791 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state);
3799 3792 uint32_t reset_delay;
3800 3793 int status, i;
3801 3794 uint32_t sem;
3802 3795 uint_t offset;
3803 3796 uint32_t data32; /* for devctl & linkctl */
3804 3797 int loopcnt;
3805 3798
3806 3799 /* initialize the FMA retry loop */
3807 3800 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
3808 3801 hermon_pio_init(fm_loop_cnt2, fm_status2, fm_test2);
3809 3802
3810 3803 /*
3811 3804 * If the configured software reset delay is set to zero, then we
3812 3805 * will not attempt a software reset of the Hermon device.
3813 3806 */
3814 3807 reset_delay = state->hs_cfg_profile->cp_sw_reset_delay;
3815 3808 if (reset_delay == 0) {
3816 3809 return (DDI_SUCCESS);
3817 3810 }
3818 3811
3819 3812 /* the FMA retry loop starts. */
3820 3813 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
3821 3814 fm_test);
3822 3815 hermon_pio_start(state, hdl, pio_error2, fm_loop_cnt2, fm_status2,
3823 3816 fm_test2);
3824 3817
3825 3818 /* Query the PCI capabilities of the HCA device */
3826 3819 /* but don't process the VPD until after reset */
3827 3820 status = hermon_pci_capability_list(state, hdl);
3828 3821 if (status != DDI_SUCCESS) {
3829 3822 cmn_err(CE_NOTE, "failed to get pci capabilities list(0x%x)\n",
3830 3823 status);
3831 3824 return (DDI_FAILURE);
3832 3825 }
3833 3826
3834 3827 /*
3835 3828 * Read all PCI config info (reg0...reg63). Note: According to the
3836 3829 * Hermon software reset application note, we should not read or
3837 3830 * restore the values in reg22 and reg23.
3838 3831 * NOTE: For Hermon (and Arbel too) it says to restore the command
3839 3832 * register LAST, and technically, you need to restore the
3840 3833 * PCIE Capability "device control" and "link control" (word-sized,
3841 3834 * at offsets 0x08 and 0x10 from the capbility ID respectively).
3842 3835 * We hold off restoring the command register - offset 0x4 - till last
3843 3836 */
3844 3837
3845 3838 /* 1st, wait for the semaphore assure accessibility - per PRM */
3846 3839 status = -1;
3847 3840 for (i = 0; i < NANOSEC/MICROSEC /* 1sec timeout */; i++) {
3848 3841 sem = ddi_get32(cmdhdl, state->hs_cmd_regs.sw_semaphore);
3849 3842 if (sem == 0) {
3850 3843 status = 0;
3851 3844 break;
3852 3845 }
3853 3846 drv_usecwait(1);
3854 3847 }
3855 3848
3856 3849 /* Check if timeout happens */
3857 3850 if (status == -1) {
3858 3851 /*
3859 3852 * Remove this acc handle from Hermon, then log
3860 3853 * the error.
3861 3854 */
3862 3855 hermon_pci_config_teardown(state, &hdl);
3863 3856
3864 3857 cmn_err(CE_WARN, "hermon_sw_reset timeout: "
3865 3858 "failed to get the semaphore(0x%p)\n",
3866 3859 (void *)state->hs_cmd_regs.sw_semaphore);
3867 3860
3868 3861 hermon_fm_ereport(state, HCA_IBA_ERR, HCA_ERR_NON_FATAL);
3869 3862 return (DDI_FAILURE);
3870 3863 }
3871 3864
3872 3865 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
3873 3866 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
3874 3867 (i != HERMON_SW_RESET_REG23_RSVD)) {
3875 3868 state->hs_cfg_data[i] = pci_config_get32(hdl, i << 2);
3876 3869 }
3877 3870 }
3878 3871
3879 3872 /*
3880 3873 * Perform the software reset (by writing 1 at offset 0xF0010)
3881 3874 */
3882 3875 ddi_put32(cmdhdl, state->hs_cmd_regs.sw_reset, HERMON_SW_RESET_START);
3883 3876
3884 3877 /*
3885 3878 * This delay is required so as not to cause a panic here. If the
3886 3879 * device is accessed too soon after reset it will not respond to
3887 3880 * config cycles, causing a Master Abort and panic.
3888 3881 */
3889 3882 drv_usecwait(reset_delay);
3890 3883
3891 3884 /*
3892 3885 * Poll waiting for the device to finish resetting.
3893 3886 */
3894 3887 loopcnt = 100; /* 100 times @ 100 usec - total delay 10 msec */
3895 3888 while ((pci_config_get32(hdl, 0) & 0x0000FFFF) != PCI_VENID_MLX) {
3896 3889 drv_usecwait(HERMON_SW_RESET_POLL_DELAY);
3897 3890 if (--loopcnt == 0)
3898 3891 break; /* just in case, break and go on */
3899 3892 }
3900 3893 if (loopcnt == 0)
3901 3894 cmn_err(CE_CONT, "!Never see VEND_ID - read == %X",
3902 3895 pci_config_get32(hdl, 0));
3903 3896
3904 3897 /*
3905 3898 * Restore the config info
3906 3899 */
3907 3900 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
3908 3901 if (i == 1) continue; /* skip the status/ctrl reg */
3909 3902 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
3910 3903 (i != HERMON_SW_RESET_REG23_RSVD)) {
3911 3904 pci_config_put32(hdl, i << 2, state->hs_cfg_data[i]);
3912 3905 }
3913 3906 }
3914 3907
3915 3908 /*
3916 3909 * PCI Express Capability - we saved during capability list, and
3917 3910 * we'll restore them here.
3918 3911 */
3919 3912 offset = state->hs_pci_cap_offset;
3920 3913 data32 = state->hs_pci_cap_devctl;
3921 3914 pci_config_put32(hdl, offset + HERMON_PCI_CAP_DEV_OFFS, data32);
3922 3915 data32 = state->hs_pci_cap_lnkctl;
3923 3916 pci_config_put32(hdl, offset + HERMON_PCI_CAP_LNK_OFFS, data32);
3924 3917
3925 3918 pci_config_put32(hdl, 0x04, (state->hs_cfg_data[1] | 0x0006));
3926 3919
3927 3920 /* the FMA retry loop ends. */
3928 3921 hermon_pio_end(state, hdl, pio_error2, fm_loop_cnt2, fm_status2,
3929 3922 fm_test2);
3930 3923 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
3931 3924 fm_test);
3932 3925
3933 3926 return (DDI_SUCCESS);
3934 3927
3935 3928 pio_error2:
3936 3929 /* fall through */
3937 3930 pio_error:
3938 3931 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL);
3939 3932 return (DDI_FAILURE);
3940 3933 }
3941 3934
3942 3935
3943 3936 /*
3944 3937 * hermon_mcg_init()
3945 3938 * Context: Only called from attach() path context
3946 3939 */
3947 3940 static int
3948 3941 hermon_mcg_init(hermon_state_t *state)
3949 3942 {
3950 3943 uint_t mcg_tmp_sz;
3951 3944
3952 3945
3953 3946 /*
3954 3947 * Allocate space for the MCG temporary copy buffer. This is
3955 3948 * used by the Attach/Detach Multicast Group code
3956 3949 */
3957 3950 mcg_tmp_sz = HERMON_MCGMEM_SZ(state);
3958 3951 state->hs_mcgtmp = kmem_zalloc(mcg_tmp_sz, KM_SLEEP);
3959 3952
3960 3953 /*
3961 3954 * Initialize the multicast group mutex. This ensures atomic
3962 3955 * access to add, modify, and remove entries in the multicast
3963 3956 * group hash lists.
3964 3957 */
3965 3958 mutex_init(&state->hs_mcglock, NULL, MUTEX_DRIVER,
3966 3959 DDI_INTR_PRI(state->hs_intrmsi_pri));
3967 3960
3968 3961 return (DDI_SUCCESS);
3969 3962 }
3970 3963
3971 3964
3972 3965 /*
3973 3966 * hermon_mcg_fini()
3974 3967 * Context: Only called from attach() and/or detach() path contexts
3975 3968 */
3976 3969 static void
3977 3970 hermon_mcg_fini(hermon_state_t *state)
3978 3971 {
3979 3972 uint_t mcg_tmp_sz;
3980 3973
3981 3974
3982 3975 /* Free up the space used for the MCG temporary copy buffer */
3983 3976 mcg_tmp_sz = HERMON_MCGMEM_SZ(state);
3984 3977 kmem_free(state->hs_mcgtmp, mcg_tmp_sz);
3985 3978
3986 3979 /* Destroy the multicast group mutex */
3987 3980 mutex_destroy(&state->hs_mcglock);
3988 3981
3989 3982 }
3990 3983
3991 3984
3992 3985 /*
3993 3986 * hermon_fw_version_check()
3994 3987 * Context: Only called from attach() path context
3995 3988 */
3996 3989 static int
3997 3990 hermon_fw_version_check(hermon_state_t *state)
3998 3991 {
3999 3992
4000 3993 uint_t hermon_fw_ver_major;
4001 3994 uint_t hermon_fw_ver_minor;
4002 3995 uint_t hermon_fw_ver_subminor;
4003 3996
4004 3997 #ifdef FMA_TEST
4005 3998 if (hermon_test_num == -1) {
4006 3999 return (DDI_FAILURE);
4007 4000 }
4008 4001 #endif
4009 4002
4010 4003 /*
4011 4004 * Depending on which version of driver we have attached, and which
4012 4005 * HCA we've attached, the firmware version checks will be different.
4013 4006 * We set up the comparison values for both Arbel and Sinai HCAs.
4014 4007 */
4015 4008 switch (state->hs_operational_mode) {
4016 4009 case HERMON_HCA_MODE:
4017 4010 hermon_fw_ver_major = HERMON_FW_VER_MAJOR;
4018 4011 hermon_fw_ver_minor = HERMON_FW_VER_MINOR;
4019 4012 hermon_fw_ver_subminor = HERMON_FW_VER_SUBMINOR;
4020 4013 break;
4021 4014
4022 4015 default:
4023 4016 return (DDI_FAILURE);
4024 4017 }
4025 4018
4026 4019 /*
4027 4020 * If FW revision major number is less than acceptable,
4028 4021 * return failure, else if greater return success. If
4029 4022 * the major numbers are equal than check the minor number
4030 4023 */
4031 4024 if (state->hs_fw.fw_rev_major < hermon_fw_ver_major) {
4032 4025 return (DDI_FAILURE);
4033 4026 } else if (state->hs_fw.fw_rev_major > hermon_fw_ver_major) {
4034 4027 return (DDI_SUCCESS);
4035 4028 }
4036 4029
4037 4030 /*
4038 4031 * Do the same check as above, except for minor revision numbers
4039 4032 * If the minor numbers are equal than check the subminor number
4040 4033 */
4041 4034 if (state->hs_fw.fw_rev_minor < hermon_fw_ver_minor) {
4042 4035 return (DDI_FAILURE);
4043 4036 } else if (state->hs_fw.fw_rev_minor > hermon_fw_ver_minor) {
4044 4037 return (DDI_SUCCESS);
4045 4038 }
4046 4039
4047 4040 /*
4048 4041 * Once again we do the same check as above, except for the subminor
4049 4042 * revision number. If the subminor numbers are equal here, then
4050 4043 * these are the same firmware version, return success
4051 4044 */
4052 4045 if (state->hs_fw.fw_rev_subminor < hermon_fw_ver_subminor) {
4053 4046 return (DDI_FAILURE);
4054 4047 } else if (state->hs_fw.fw_rev_subminor > hermon_fw_ver_subminor) {
4055 4048 return (DDI_SUCCESS);
4056 4049 }
4057 4050
4058 4051 return (DDI_SUCCESS);
4059 4052 }
4060 4053
4061 4054
4062 4055 /*
4063 4056 * hermon_device_info_report()
4064 4057 * Context: Only called from attach() path context
4065 4058 */
4066 4059 static void
4067 4060 hermon_device_info_report(hermon_state_t *state)
4068 4061 {
4069 4062
4070 4063 cmn_err(CE_CONT, "?hermon%d: FW ver: %04d.%04d.%04d, "
4071 4064 "HW rev: %02d\n", state->hs_instance, state->hs_fw.fw_rev_major,
4072 4065 state->hs_fw.fw_rev_minor, state->hs_fw.fw_rev_subminor,
4073 4066 state->hs_revision_id);
4074 4067 cmn_err(CE_CONT, "?hermon%d: %64s (0x%016" PRIx64 ")\n",
4075 4068 state->hs_instance, state->hs_nodedesc, state->hs_nodeguid);
4076 4069
4077 4070 }
4078 4071
4079 4072
4080 4073 /*
4081 4074 * hermon_pci_capability_list()
4082 4075 * Context: Only called from attach() path context
4083 4076 */
4084 4077 static int
4085 4078 hermon_pci_capability_list(hermon_state_t *state, ddi_acc_handle_t hdl)
4086 4079 {
4087 4080 uint_t offset, data;
4088 4081 uint32_t data32;
4089 4082
4090 4083 state->hs_pci_cap_offset = 0; /* make sure it's cleared */
4091 4084
4092 4085 /*
4093 4086 * Check for the "PCI Capabilities" bit in the "Status Register".
4094 4087 * Bit 4 in this register indicates the presence of a "PCI
4095 4088 * Capabilities" list.
4096 4089 *
4097 4090 * PCI-Express requires this bit to be set to 1.
4098 4091 */
4099 4092 data = pci_config_get16(hdl, 0x06);
4100 4093 if ((data & 0x10) == 0) {
4101 4094 return (DDI_FAILURE);
4102 4095 }
4103 4096
4104 4097 /*
4105 4098 * Starting from offset 0x34 in PCI config space, find the
4106 4099 * head of "PCI capabilities" list, and walk the list. If
4107 4100 * capabilities of a known type are encountered (e.g.
4108 4101 * "PCI-X Capability"), then call the appropriate handler
4109 4102 * function.
4110 4103 */
4111 4104 offset = pci_config_get8(hdl, 0x34);
4112 4105 while (offset != 0x0) {
4113 4106 data = pci_config_get8(hdl, offset);
4114 4107 /*
4115 4108 * Check for known capability types. Hermon has the
4116 4109 * following:
4117 4110 * o Power Mgmt (0x02)
4118 4111 * o VPD Capability (0x03)
4119 4112 * o PCI-E Capability (0x10)
4120 4113 * o MSIX Capability (0x11)
4121 4114 */
4122 4115 switch (data) {
4123 4116 case 0x01:
4124 4117 /* power mgmt handling */
4125 4118 break;
4126 4119 case 0x03:
4127 4120
4128 4121 /*
4129 4122 * Reading the PCIe VPD is inconsistent - that is, sometimes causes
4130 4123 * problems on (mostly) X64, though we've also seen problems w/ Sparc
4131 4124 * and Tavor --- so, for now until it's root caused, don't try and
4132 4125 * read it
4133 4126 */
4134 4127 #ifdef HERMON_VPD_WORKS
4135 4128 hermon_pci_capability_vpd(state, hdl, offset);
4136 4129 #else
4137 4130 delay(100);
4138 4131 hermon_pci_capability_vpd(state, hdl, offset);
4139 4132 #endif
4140 4133 break;
4141 4134 case 0x10:
4142 4135 /*
4143 4136 * PCI Express Capability - save offset & contents
4144 4137 * for later in reset
4145 4138 */
4146 4139 state->hs_pci_cap_offset = offset;
4147 4140 data32 = pci_config_get32(hdl,
4148 4141 offset + HERMON_PCI_CAP_DEV_OFFS);
4149 4142 state->hs_pci_cap_devctl = data32;
4150 4143 data32 = pci_config_get32(hdl,
4151 4144 offset + HERMON_PCI_CAP_LNK_OFFS);
4152 4145 state->hs_pci_cap_lnkctl = data32;
4153 4146 break;
4154 4147 case 0x11:
4155 4148 /*
4156 4149 * MSIX support - nothing to do, taken care of in the
4157 4150 * MSI/MSIX interrupt frameworkd
4158 4151 */
4159 4152 break;
4160 4153 default:
4161 4154 /* just go on to the next */
4162 4155 break;
4163 4156 }
4164 4157
4165 4158 /* Get offset of next entry in list */
4166 4159 offset = pci_config_get8(hdl, offset + 1);
4167 4160 }
4168 4161
4169 4162 return (DDI_SUCCESS);
4170 4163 }
4171 4164
4172 4165 /*
4173 4166 * hermon_pci_read_vpd()
4174 4167 * Context: Only called from attach() path context
4175 4168 * utility routine for hermon_pci_capability_vpd()
4176 4169 */
4177 4170 static int
4178 4171 hermon_pci_read_vpd(ddi_acc_handle_t hdl, uint_t offset, uint32_t addr,
4179 4172 uint32_t *data)
4180 4173 {
4181 4174 int retry = 40; /* retry counter for EEPROM poll */
4182 4175 uint32_t val;
4183 4176 int vpd_addr = offset + 2;
4184 4177 int vpd_data = offset + 4;
4185 4178
4186 4179 /*
4187 4180 * In order to read a 32-bit value from VPD, we are to write down
4188 4181 * the address (offset in the VPD itself) to the address register.
4189 4182 * To signal the read, we also clear bit 31. We then poll on bit 31
4190 4183 * and when it is set, we can then read our 4 bytes from the data
4191 4184 * register.
4192 4185 */
4193 4186 (void) pci_config_put32(hdl, offset, addr << 16);
4194 4187 do {
4195 4188 drv_usecwait(1000);
4196 4189 val = pci_config_get16(hdl, vpd_addr);
4197 4190 if (val & 0x8000) { /* flag bit set */
4198 4191 *data = pci_config_get32(hdl, vpd_data);
4199 4192 return (DDI_SUCCESS);
4200 4193 }
4201 4194 } while (--retry);
4202 4195 /* read of flag failed write one message but count the failures */
4203 4196 if (debug_vpd == 0)
4204 4197 cmn_err(CE_NOTE,
4205 4198 "!Failed to see flag bit after VPD addr write\n");
4206 4199 debug_vpd++;
4207 4200
4208 4201
4209 4202 vpd_read_fail:
4210 4203 return (DDI_FAILURE);
4211 4204 }
4212 4205
4213 4206
4214 4207
4215 4208 /*
4216 4209 * hermon_pci_capability_vpd()
4217 4210 * Context: Only called from attach() path context
4218 4211 */
4219 4212 static void
4220 4213 hermon_pci_capability_vpd(hermon_state_t *state, ddi_acc_handle_t hdl,
4221 4214 uint_t offset)
4222 4215 {
4223 4216 uint8_t name_length;
4224 4217 uint8_t pn_length;
4225 4218 int i, err = 0;
4226 4219 int vpd_str_id = 0;
4227 4220 int vpd_ro_desc;
4228 4221 int vpd_ro_pn_desc;
4229 4222 #ifdef _BIG_ENDIAN
4230 4223 uint32_t data32;
4231 4224 #endif /* _BIG_ENDIAN */
4232 4225 union {
4233 4226 uint32_t vpd_int[HERMON_VPD_HDR_DWSIZE];
4234 4227 uchar_t vpd_char[HERMON_VPD_HDR_BSIZE];
4235 4228 } vpd;
4236 4229
4237 4230
4238 4231 /*
4239 4232 * Read in the Vital Product Data (VPD) to the extend needed
4240 4233 * by the fwflash utility
4241 4234 */
4242 4235 for (i = 0; i < HERMON_VPD_HDR_DWSIZE; i++) {
4243 4236 err = hermon_pci_read_vpd(hdl, offset, i << 2, &vpd.vpd_int[i]);
4244 4237 if (err != DDI_SUCCESS) {
4245 4238 cmn_err(CE_NOTE, "!VPD read failed\n");
4246 4239 goto out;
4247 4240 }
4248 4241 }
4249 4242
4250 4243 #ifdef _BIG_ENDIAN
4251 4244 /* Need to swap bytes for big endian. */
4252 4245 for (i = 0; i < HERMON_VPD_HDR_DWSIZE; i++) {
4253 4246 data32 = vpd.vpd_int[i];
4254 4247 vpd.vpd_char[(i << 2) + 3] =
4255 4248 (uchar_t)((data32 & 0xFF000000) >> 24);
4256 4249 vpd.vpd_char[(i << 2) + 2] =
4257 4250 (uchar_t)((data32 & 0x00FF0000) >> 16);
4258 4251 vpd.vpd_char[(i << 2) + 1] =
4259 4252 (uchar_t)((data32 & 0x0000FF00) >> 8);
4260 4253 vpd.vpd_char[i << 2] = (uchar_t)(data32 & 0x000000FF);
4261 4254 }
4262 4255 #endif /* _BIG_ENDIAN */
4263 4256
4264 4257 /* Check for VPD String ID Tag */
4265 4258 if (vpd.vpd_char[vpd_str_id] == 0x82) {
4266 4259 /* get the product name */
4267 4260 name_length = (uint8_t)vpd.vpd_char[vpd_str_id + 1];
4268 4261 if (name_length > sizeof (state->hs_hca_name)) {
4269 4262 cmn_err(CE_NOTE, "!VPD name too large (0x%x)\n",
4270 4263 name_length);
4271 4264 goto out;
4272 4265 }
4273 4266 (void) memcpy(state->hs_hca_name, &vpd.vpd_char[vpd_str_id + 3],
4274 4267 name_length);
4275 4268 state->hs_hca_name[name_length] = 0;
4276 4269
4277 4270 /* get the part number */
4278 4271 vpd_ro_desc = name_length + 3; /* read-only tag location */
4279 4272 vpd_ro_pn_desc = vpd_ro_desc + 3; /* P/N keyword location */
4280 4273
4281 4274 /* Verify read-only tag and Part Number keyword. */
4282 4275 if (vpd.vpd_char[vpd_ro_desc] != 0x90 ||
4283 4276 (vpd.vpd_char[vpd_ro_pn_desc] != 'P' &&
4284 4277 vpd.vpd_char[vpd_ro_pn_desc + 1] != 'N')) {
4285 4278 cmn_err(CE_NOTE, "!VPD Part Number not found\n");
4286 4279 goto out;
4287 4280 }
4288 4281
4289 4282 pn_length = (uint8_t)vpd.vpd_char[vpd_ro_pn_desc + 2];
4290 4283 if (pn_length > sizeof (state->hs_hca_pn)) {
4291 4284 cmn_err(CE_NOTE, "!VPD part number too large (0x%x)\n",
4292 4285 name_length);
4293 4286 goto out;
4294 4287 }
4295 4288 (void) memcpy(state->hs_hca_pn,
4296 4289 &vpd.vpd_char[vpd_ro_pn_desc + 3],
4297 4290 pn_length);
4298 4291 state->hs_hca_pn[pn_length] = 0;
4299 4292 state->hs_hca_pn_len = pn_length;
4300 4293 cmn_err(CE_CONT, "!vpd %s\n", state->hs_hca_pn);
4301 4294 } else {
4302 4295 /* Wrong VPD String ID Tag */
4303 4296 cmn_err(CE_NOTE, "!VPD String ID Tag not found, tag: %02x\n",
4304 4297 vpd.vpd_char[0]);
4305 4298 goto out;
4306 4299 }
4307 4300 return;
4308 4301 out:
4309 4302 state->hs_hca_pn_len = 0;
4310 4303 }
4311 4304
4312 4305
4313 4306
4314 4307 /*
4315 4308 * hermon_intr_or_msi_init()
4316 4309 * Context: Only called from attach() path context
4317 4310 */
4318 4311 static int
4319 4312 hermon_intr_or_msi_init(hermon_state_t *state)
4320 4313 {
4321 4314 int status;
4322 4315
4323 4316 /* Query for the list of supported interrupt event types */
4324 4317 status = ddi_intr_get_supported_types(state->hs_dip,
4325 4318 &state->hs_intr_types_avail);
4326 4319 if (status != DDI_SUCCESS) {
4327 4320 return (DDI_FAILURE);
4328 4321 }
4329 4322
4330 4323 /*
4331 4324 * If Hermon supports MSI-X in this system (and, if it
4332 4325 * hasn't been overridden by a configuration variable), then
4333 4326 * the default behavior is to use a single MSI-X. Otherwise,
4334 4327 * fallback to using legacy interrupts. Also, if MSI-X is chosen,
4335 4328 * but fails for whatever reasons, then next try MSI
4336 4329 */
4337 4330 if ((state->hs_cfg_profile->cp_use_msi_if_avail != 0) &&
4338 4331 (state->hs_intr_types_avail & DDI_INTR_TYPE_MSIX)) {
4339 4332 status = hermon_add_intrs(state, DDI_INTR_TYPE_MSIX);
4340 4333 if (status == DDI_SUCCESS) {
4341 4334 state->hs_intr_type_chosen = DDI_INTR_TYPE_MSIX;
4342 4335 return (DDI_SUCCESS);
4343 4336 }
4344 4337 }
4345 4338
4346 4339 /*
4347 4340 * If Hermon supports MSI in this system (and, if it
4348 4341 * hasn't been overridden by a configuration variable), then
4349 4342 * the default behavior is to use a single MSIX. Otherwise,
4350 4343 * fallback to using legacy interrupts. Also, if MSI is chosen,
4351 4344 * but fails for whatever reasons, then fallback to using legacy
4352 4345 * interrupts.
4353 4346 */
4354 4347 if ((state->hs_cfg_profile->cp_use_msi_if_avail != 0) &&
4355 4348 (state->hs_intr_types_avail & DDI_INTR_TYPE_MSI)) {
4356 4349 status = hermon_add_intrs(state, DDI_INTR_TYPE_MSI);
4357 4350 if (status == DDI_SUCCESS) {
4358 4351 state->hs_intr_type_chosen = DDI_INTR_TYPE_MSI;
4359 4352 return (DDI_SUCCESS);
4360 4353 }
4361 4354 }
4362 4355
4363 4356 /*
4364 4357 * MSI interrupt allocation failed, or was not available. Fallback to
4365 4358 * legacy interrupt support.
4366 4359 */
4367 4360 if (state->hs_intr_types_avail & DDI_INTR_TYPE_FIXED) {
4368 4361 status = hermon_add_intrs(state, DDI_INTR_TYPE_FIXED);
4369 4362 if (status == DDI_SUCCESS) {
4370 4363 state->hs_intr_type_chosen = DDI_INTR_TYPE_FIXED;
4371 4364 return (DDI_SUCCESS);
4372 4365 }
4373 4366 }
4374 4367
4375 4368 /*
4376 4369 * None of MSI, MSI-X, nor legacy interrupts were successful.
4377 4370 * Return failure.
4378 4371 */
4379 4372 return (DDI_FAILURE);
4380 4373 }
4381 4374
4382 4375 /* ARGSUSED */
4383 4376 static int
4384 4377 hermon_intr_cb_handler(dev_info_t *dip, ddi_cb_action_t action, void *cbarg,
4385 4378 void *arg1, void *arg2)
4386 4379 {
4387 4380 hermon_state_t *state = (hermon_state_t *)arg1;
4388 4381
4389 4382 IBTF_DPRINTF_L2("hermon", "interrupt callback: instance %d, "
4390 4383 "action %d, cbarg %d\n", state->hs_instance, action,
4391 4384 (uint32_t)(uintptr_t)cbarg);
4392 4385 return (DDI_SUCCESS);
4393 4386 }
4394 4387
4395 4388 /*
4396 4389 * hermon_add_intrs()
4397 4390 * Context: Only called from attach() patch context
4398 4391 */
4399 4392 static int
4400 4393 hermon_add_intrs(hermon_state_t *state, int intr_type)
4401 4394 {
4402 4395 int status;
4403 4396
4404 4397 if (state->hs_intr_cb_hdl == NULL) {
4405 4398 status = ddi_cb_register(state->hs_dip, DDI_CB_FLAG_INTR,
4406 4399 hermon_intr_cb_handler, state, NULL,
4407 4400 &state->hs_intr_cb_hdl);
4408 4401 if (status != DDI_SUCCESS) {
4409 4402 cmn_err(CE_CONT, "ddi_cb_register failed: 0x%x\n",
4410 4403 status);
4411 4404 state->hs_intr_cb_hdl = NULL;
4412 4405 return (DDI_FAILURE);
4413 4406 }
4414 4407 }
4415 4408
4416 4409 /* Get number of interrupts/MSI supported */
4417 4410 status = ddi_intr_get_nintrs(state->hs_dip, intr_type,
4418 4411 &state->hs_intrmsi_count);
4419 4412 if (status != DDI_SUCCESS) {
4420 4413 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4421 4414 state->hs_intr_cb_hdl = NULL;
4422 4415 return (DDI_FAILURE);
4423 4416 }
4424 4417
4425 4418 /* Get number of available interrupts/MSI */
4426 4419 status = ddi_intr_get_navail(state->hs_dip, intr_type,
4427 4420 &state->hs_intrmsi_avail);
4428 4421 if (status != DDI_SUCCESS) {
4429 4422 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4430 4423 state->hs_intr_cb_hdl = NULL;
4431 4424 return (DDI_FAILURE);
4432 4425 }
4433 4426
4434 4427 /* Ensure that we have at least one (1) usable MSI or interrupt */
4435 4428 if ((state->hs_intrmsi_avail < 1) || (state->hs_intrmsi_count < 1)) {
4436 4429 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4437 4430 state->hs_intr_cb_hdl = NULL;
4438 4431 return (DDI_FAILURE);
4439 4432 }
4440 4433
4441 4434 /*
4442 4435 * Allocate the #interrupt/MSI handles.
4443 4436 * The number we request is the minimum of these three values:
4444 4437 * HERMON_MSIX_MAX driver maximum (array size)
4445 4438 * hermon_msix_max /etc/system override to...
4446 4439 * HERMON_MSIX_MAX
4447 4440 * state->hs_intrmsi_avail Maximum the ddi provides.
4448 4441 */
4449 4442 status = ddi_intr_alloc(state->hs_dip, &state->hs_intrmsi_hdl[0],
4450 4443 intr_type, 0, min(min(HERMON_MSIX_MAX, state->hs_intrmsi_avail),
4451 4444 hermon_msix_max), &state->hs_intrmsi_allocd, DDI_INTR_ALLOC_NORMAL);
4452 4445 if (status != DDI_SUCCESS) {
4453 4446 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4454 4447 state->hs_intr_cb_hdl = NULL;
4455 4448 return (DDI_FAILURE);
4456 4449 }
4457 4450
4458 4451 /* Ensure that we have allocated at least one (1) MSI or interrupt */
4459 4452 if (state->hs_intrmsi_allocd < 1) {
4460 4453 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4461 4454 state->hs_intr_cb_hdl = NULL;
4462 4455 return (DDI_FAILURE);
4463 4456 }
4464 4457
4465 4458 /*
4466 4459 * Extract the priority for the allocated interrupt/MSI. This
4467 4460 * will be used later when initializing certain mutexes.
4468 4461 */
4469 4462 status = ddi_intr_get_pri(state->hs_intrmsi_hdl[0],
4470 4463 &state->hs_intrmsi_pri);
4471 4464 if (status != DDI_SUCCESS) {
4472 4465 /* Free the allocated interrupt/MSI handle */
4473 4466 (void) ddi_intr_free(state->hs_intrmsi_hdl[0]);
4474 4467
4475 4468 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4476 4469 state->hs_intr_cb_hdl = NULL;
4477 4470 return (DDI_FAILURE);
4478 4471 }
4479 4472
4480 4473 /* Make sure the interrupt/MSI priority is below 'high level' */
4481 4474 if (state->hs_intrmsi_pri >= ddi_intr_get_hilevel_pri()) {
4482 4475 /* Free the allocated interrupt/MSI handle */
4483 4476 (void) ddi_intr_free(state->hs_intrmsi_hdl[0]);
4484 4477
4485 4478 return (DDI_FAILURE);
4486 4479 }
4487 4480
4488 4481 /* Get add'l capability information regarding interrupt/MSI */
4489 4482 status = ddi_intr_get_cap(state->hs_intrmsi_hdl[0],
4490 4483 &state->hs_intrmsi_cap);
4491 4484 if (status != DDI_SUCCESS) {
4492 4485 /* Free the allocated interrupt/MSI handle */
4493 4486 (void) ddi_intr_free(state->hs_intrmsi_hdl[0]);
4494 4487
4495 4488 return (DDI_FAILURE);
4496 4489 }
4497 4490
4498 4491 return (DDI_SUCCESS);
4499 4492 }
4500 4493
4501 4494
4502 4495 /*
4503 4496 * hermon_intr_or_msi_fini()
4504 4497 * Context: Only called from attach() and/or detach() path contexts
4505 4498 */
4506 4499 static int
4507 4500 hermon_intr_or_msi_fini(hermon_state_t *state)
4508 4501 {
4509 4502 int status;
4510 4503 int intr;
4511 4504
4512 4505 for (intr = 0; intr < state->hs_intrmsi_allocd; intr++) {
4513 4506
4514 4507 /* Free the allocated interrupt/MSI handle */
4515 4508 status = ddi_intr_free(state->hs_intrmsi_hdl[intr]);
4516 4509 if (status != DDI_SUCCESS) {
4517 4510 return (DDI_FAILURE);
4518 4511 }
4519 4512 }
4520 4513 if (state->hs_intr_cb_hdl) {
4521 4514 (void) ddi_cb_unregister(state->hs_intr_cb_hdl);
4522 4515 state->hs_intr_cb_hdl = NULL;
4523 4516 }
4524 4517 return (DDI_SUCCESS);
4525 4518 }
4526 4519
4527 4520
4528 4521 /*ARGSUSED*/
4529 4522 void
4530 4523 hermon_pci_capability_msix(hermon_state_t *state, ddi_acc_handle_t hdl,
4531 4524 uint_t offset)
4532 4525 {
4533 4526 uint32_t msix_data;
4534 4527 uint16_t msg_cntr;
4535 4528 uint32_t t_offset; /* table offset */
4536 4529 uint32_t t_bir;
4537 4530 uint32_t p_offset; /* pba */
4538 4531 uint32_t p_bir;
4539 4532 int t_size; /* size in entries - each is 4 dwords */
4540 4533
4541 4534 /* come in with offset pointing at the capability structure */
4542 4535
4543 4536 msix_data = pci_config_get32(hdl, offset);
4544 4537 cmn_err(CE_CONT, "Full cap structure dword = %X\n", msix_data);
4545 4538 msg_cntr = pci_config_get16(hdl, offset+2);
4546 4539 cmn_err(CE_CONT, "MSIX msg_control = %X\n", msg_cntr);
4547 4540 offset += 4;
4548 4541 msix_data = pci_config_get32(hdl, offset); /* table info */
4549 4542 t_offset = (msix_data & 0xFFF8) >> 3;
4550 4543 t_bir = msix_data & 0x07;
4551 4544 offset += 4;
4552 4545 cmn_err(CE_CONT, " table %X --offset = %X, bir(bar) = %X\n",
4553 4546 msix_data, t_offset, t_bir);
4554 4547 msix_data = pci_config_get32(hdl, offset); /* PBA info */
4555 4548 p_offset = (msix_data & 0xFFF8) >> 3;
4556 4549 p_bir = msix_data & 0x07;
4557 4550
4558 4551 cmn_err(CE_CONT, " PBA %X --offset = %X, bir(bar) = %X\n",
4559 4552 msix_data, p_offset, p_bir);
4560 4553 t_size = msg_cntr & 0x7FF; /* low eleven bits */
4561 4554 cmn_err(CE_CONT, " table size = %X entries\n", t_size);
4562 4555
4563 4556 offset = t_offset; /* reuse this for offset from BAR */
4564 4557 #ifdef HERMON_SUPPORTS_MSIX_BAR
4565 4558 cmn_err(CE_CONT, "First 2 table entries behind BAR2 \n");
4566 4559 for (i = 0; i < 2; i++) {
4567 4560 for (j = 0; j < 4; j++, offset += 4) {
4568 4561 msix_data = ddi_get32(state->hs_reg_msihdl,
4569 4562 (uint32_t *)((uintptr_t)state->hs_reg_msi_baseaddr
4570 4563 + offset));
4571 4564 cmn_err(CE_CONT, "MSI table entry %d, dword %d == %X\n",
4572 4565 i, j, msix_data);
4573 4566 }
4574 4567 }
4575 4568 #endif
4576 4569
4577 4570 }
4578 4571
4579 4572 /*
4580 4573 * X86 fastreboot support functions.
4581 4574 * These functions are used to save/restore MSI-X table/PBA and also
4582 4575 * to disable MSI-X interrupts in hermon_quiesce().
4583 4576 */
4584 4577
4585 4578 /* Return the message control for MSI-X */
4586 4579 static ushort_t
4587 4580 get_msix_ctrl(dev_info_t *dip)
4588 4581 {
4589 4582 ushort_t msix_ctrl = 0, caps_ctrl = 0;
4590 4583 hermon_state_t *state = ddi_get_soft_state(hermon_statep,
4591 4584 DEVI(dip)->devi_instance);
4592 4585 ddi_acc_handle_t pci_cfg_hdl = hermon_get_pcihdl(state);
4593 4586 ASSERT(pci_cfg_hdl != NULL);
4594 4587
4595 4588 if ((PCI_CAP_LOCATE(pci_cfg_hdl,
4596 4589 PCI_CAP_ID_MSI_X, &caps_ctrl) == DDI_SUCCESS)) {
4597 4590 if ((msix_ctrl = PCI_CAP_GET16(pci_cfg_hdl, NULL, caps_ctrl,
4598 4591 PCI_MSIX_CTRL)) == PCI_CAP_EINVAL16)
4599 4592 return (0);
4600 4593 }
4601 4594 ASSERT(msix_ctrl != 0);
4602 4595
4603 4596 return (msix_ctrl);
4604 4597 }
4605 4598
4606 4599 /* Return the MSI-X table size */
4607 4600 static size_t
4608 4601 get_msix_tbl_size(dev_info_t *dip)
4609 4602 {
4610 4603 ushort_t msix_ctrl = get_msix_ctrl(dip);
4611 4604 ASSERT(msix_ctrl != 0);
4612 4605
4613 4606 return (((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 1) *
4614 4607 PCI_MSIX_VECTOR_SIZE);
4615 4608 }
4616 4609
4617 4610 /* Return the MSI-X PBA size */
4618 4611 static size_t
4619 4612 get_msix_pba_size(dev_info_t *dip)
4620 4613 {
4621 4614 ushort_t msix_ctrl = get_msix_ctrl(dip);
4622 4615 ASSERT(msix_ctrl != 0);
4623 4616
4624 4617 return (((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 64) / 64 * 8);
4625 4618 }
4626 4619
4627 4620 /* Set up the MSI-X table/PBA save area */
4628 4621 static void
4629 4622 hermon_set_msix_info(hermon_state_t *state)
4630 4623 {
4631 4624 uint_t rnumber, breg, nregs;
4632 4625 ushort_t caps_ctrl, msix_ctrl;
4633 4626 pci_regspec_t *rp;
4634 4627 int reg_size, addr_space, offset, *regs_list, i;
4635 4628
4636 4629 /*
4637 4630 * MSI-X BIR Index Table:
4638 4631 * BAR indicator register (BIR) to Base Address register.
4639 4632 */
4640 4633 uchar_t pci_msix_bir_index[8] = {0x10, 0x14, 0x18, 0x1c,
4641 4634 0x20, 0x24, 0xff, 0xff};
4642 4635
4643 4636 /* Fastreboot data access attribute */
4644 4637 ddi_device_acc_attr_t dev_attr = {
4645 4638 0, /* version */
4646 4639 DDI_STRUCTURE_LE_ACC,
4647 4640 DDI_STRICTORDER_ACC, /* attr access */
4648 4641 0
4649 4642 };
4650 4643
4651 4644 ddi_acc_handle_t pci_cfg_hdl = hermon_get_pcihdl(state);
4652 4645 ASSERT(pci_cfg_hdl != NULL);
4653 4646
4654 4647 if ((PCI_CAP_LOCATE(pci_cfg_hdl,
4655 4648 PCI_CAP_ID_MSI_X, &caps_ctrl) == DDI_SUCCESS)) {
4656 4649 if ((msix_ctrl = PCI_CAP_GET16(pci_cfg_hdl, NULL, caps_ctrl,
4657 4650 PCI_MSIX_CTRL)) == PCI_CAP_EINVAL16)
4658 4651 return;
4659 4652 }
4660 4653 ASSERT(msix_ctrl != 0);
4661 4654
4662 4655 state->hs_msix_tbl_offset = PCI_CAP_GET32(pci_cfg_hdl, NULL, caps_ctrl,
4663 4656 PCI_MSIX_TBL_OFFSET);
4664 4657
4665 4658 /* Get the BIR for MSI-X table */
4666 4659 breg = pci_msix_bir_index[state->hs_msix_tbl_offset &
4667 4660 PCI_MSIX_TBL_BIR_MASK];
4668 4661 ASSERT(breg != 0xFF);
4669 4662
4670 4663 /* Set the MSI-X table offset */
4671 4664 state->hs_msix_tbl_offset = state->hs_msix_tbl_offset &
4672 4665 ~PCI_MSIX_TBL_BIR_MASK;
4673 4666
4674 4667 /* Set the MSI-X table size */
4675 4668 state->hs_msix_tbl_size = ((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 1) *
4676 4669 PCI_MSIX_VECTOR_SIZE;
4677 4670
4678 4671 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, state->hs_dip,
4679 4672 DDI_PROP_DONTPASS, "reg", (int **)®s_list, &nregs) !=
4680 4673 DDI_PROP_SUCCESS) {
4681 4674 return;
4682 4675 }
4683 4676 reg_size = sizeof (pci_regspec_t) / sizeof (int);
4684 4677
4685 4678 /* Check the register number for MSI-X table */
4686 4679 for (i = 1, rnumber = 0; i < nregs/reg_size; i++) {
4687 4680 rp = (pci_regspec_t *)®s_list[i * reg_size];
4688 4681 addr_space = rp->pci_phys_hi & PCI_ADDR_MASK;
4689 4682 offset = PCI_REG_REG_G(rp->pci_phys_hi);
4690 4683
4691 4684 if ((offset == breg) && ((addr_space == PCI_ADDR_MEM32) ||
4692 4685 (addr_space == PCI_ADDR_MEM64))) {
4693 4686 rnumber = i;
4694 4687 break;
4695 4688 }
4696 4689 }
4697 4690 ASSERT(rnumber != 0);
4698 4691 state->hs_msix_tbl_rnumber = rnumber;
4699 4692
4700 4693 /* Set device attribute version and access according to Hermon FM */
4701 4694 dev_attr.devacc_attr_version = hermon_devacc_attr_version(state);
4702 4695 dev_attr.devacc_attr_access = hermon_devacc_attr_access(state);
4703 4696
4704 4697 /* Map the entire MSI-X vector table */
4705 4698 if (hermon_regs_map_setup(state, state->hs_msix_tbl_rnumber,
4706 4699 (caddr_t *)&state->hs_msix_tbl_addr, state->hs_msix_tbl_offset,
4707 4700 state->hs_msix_tbl_size, &dev_attr,
4708 4701 &state->hs_fm_msix_tblhdl) != DDI_SUCCESS) {
4709 4702 return;
4710 4703 }
4711 4704
4712 4705 state->hs_msix_pba_offset = PCI_CAP_GET32(pci_cfg_hdl, NULL, caps_ctrl,
4713 4706 PCI_MSIX_PBA_OFFSET);
4714 4707
4715 4708 /* Get the BIR for MSI-X PBA */
4716 4709 breg = pci_msix_bir_index[state->hs_msix_pba_offset &
4717 4710 PCI_MSIX_PBA_BIR_MASK];
4718 4711 ASSERT(breg != 0xFF);
4719 4712
4720 4713 /* Set the MSI-X PBA offset */
4721 4714 state->hs_msix_pba_offset = state->hs_msix_pba_offset &
4722 4715 ~PCI_MSIX_PBA_BIR_MASK;
4723 4716
4724 4717 /* Set the MSI-X PBA size */
4725 4718 state->hs_msix_pba_size =
4726 4719 ((msix_ctrl & PCI_MSIX_TBL_SIZE_MASK) + 64) / 64 * 8;
4727 4720
4728 4721 /* Check the register number for MSI-X PBA */
4729 4722 for (i = 1, rnumber = 0; i < nregs/reg_size; i++) {
4730 4723 rp = (pci_regspec_t *)®s_list[i * reg_size];
4731 4724 addr_space = rp->pci_phys_hi & PCI_ADDR_MASK;
4732 4725 offset = PCI_REG_REG_G(rp->pci_phys_hi);
4733 4726
4734 4727 if ((offset == breg) && ((addr_space == PCI_ADDR_MEM32) ||
4735 4728 (addr_space == PCI_ADDR_MEM64))) {
4736 4729 rnumber = i;
4737 4730 break;
4738 4731 }
4739 4732 }
4740 4733 ASSERT(rnumber != 0);
4741 4734 state->hs_msix_pba_rnumber = rnumber;
4742 4735 ddi_prop_free(regs_list);
4743 4736
4744 4737 /* Map in the MSI-X Pending Bit Array */
4745 4738 if (hermon_regs_map_setup(state, state->hs_msix_pba_rnumber,
4746 4739 (caddr_t *)&state->hs_msix_pba_addr, state->hs_msix_pba_offset,
4747 4740 state->hs_msix_pba_size, &dev_attr,
4748 4741 &state->hs_fm_msix_pbahdl) != DDI_SUCCESS) {
4749 4742 hermon_regs_map_free(state, &state->hs_fm_msix_tblhdl);
4750 4743 state->hs_fm_msix_tblhdl = NULL;
4751 4744 return;
4752 4745 }
4753 4746
4754 4747 /* Set the MSI-X table save area */
4755 4748 state->hs_msix_tbl_entries = kmem_alloc(state->hs_msix_tbl_size,
4756 4749 KM_SLEEP);
4757 4750
4758 4751 /* Set the MSI-X PBA save area */
4759 4752 state->hs_msix_pba_entries = kmem_alloc(state->hs_msix_pba_size,
4760 4753 KM_SLEEP);
4761 4754 }
4762 4755
4763 4756 /* Disable Hermon interrupts */
4764 4757 static int
4765 4758 hermon_intr_disable(hermon_state_t *state)
4766 4759 {
4767 4760 ushort_t msix_ctrl = 0, caps_ctrl = 0;
4768 4761 ddi_acc_handle_t pci_cfg_hdl = hermon_get_pcihdl(state);
4769 4762 ddi_acc_handle_t msix_tblhdl = hermon_get_msix_tblhdl(state);
4770 4763 int i, j;
4771 4764 ASSERT(pci_cfg_hdl != NULL && msix_tblhdl != NULL);
4772 4765 ASSERT(state->hs_intr_types_avail &
4773 4766 (DDI_INTR_TYPE_FIXED | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX));
4774 4767
4775 4768 /*
4776 4769 * Check if MSI-X interrupts are used. If so, disable MSI-X interupts.
4777 4770 * If not, since Hermon doesn't support MSI interrupts, assuming the
4778 4771 * legacy interrupt is used instead, disable the legacy interrupt.
4779 4772 */
4780 4773 if ((state->hs_cfg_profile->cp_use_msi_if_avail != 0) &&
4781 4774 (state->hs_intr_types_avail & DDI_INTR_TYPE_MSIX)) {
4782 4775
4783 4776 if ((PCI_CAP_LOCATE(pci_cfg_hdl,
4784 4777 PCI_CAP_ID_MSI_X, &caps_ctrl) == DDI_SUCCESS)) {
4785 4778 if ((msix_ctrl = PCI_CAP_GET16(pci_cfg_hdl, NULL,
4786 4779 caps_ctrl, PCI_MSIX_CTRL)) == PCI_CAP_EINVAL16)
4787 4780 return (DDI_FAILURE);
4788 4781 }
4789 4782 ASSERT(msix_ctrl != 0);
4790 4783
4791 4784 if (!(msix_ctrl & PCI_MSIX_ENABLE_BIT))
4792 4785 return (DDI_SUCCESS);
4793 4786
4794 4787 /* Clear all inums in MSI-X table */
4795 4788 for (i = 0; i < get_msix_tbl_size(state->hs_dip);
4796 4789 i += PCI_MSIX_VECTOR_SIZE) {
4797 4790 for (j = 0; j < PCI_MSIX_VECTOR_SIZE; j += 4) {
4798 4791 char *addr = state->hs_msix_tbl_addr + i + j;
4799 4792 ddi_put32(msix_tblhdl,
4800 4793 (uint32_t *)(uintptr_t)addr, 0x0);
4801 4794 }
4802 4795 }
4803 4796
4804 4797 /* Disable MSI-X interrupts */
4805 4798 msix_ctrl &= ~PCI_MSIX_ENABLE_BIT;
4806 4799 PCI_CAP_PUT16(pci_cfg_hdl, NULL, caps_ctrl, PCI_MSIX_CTRL,
4807 4800 msix_ctrl);
4808 4801
4809 4802 } else {
4810 4803 uint16_t cmdreg = pci_config_get16(pci_cfg_hdl, PCI_CONF_COMM);
4811 4804 ASSERT(state->hs_intr_types_avail & DDI_INTR_TYPE_FIXED);
4812 4805
4813 4806 /* Disable the legacy interrupts */
4814 4807 cmdreg |= PCI_COMM_INTX_DISABLE;
4815 4808 pci_config_put16(pci_cfg_hdl, PCI_CONF_COMM, cmdreg);
4816 4809 }
4817 4810
4818 4811 return (DDI_SUCCESS);
4819 4812 }
4820 4813
4821 4814 /* Hermon quiesce(9F) entry */
4822 4815 static int
4823 4816 hermon_quiesce(dev_info_t *dip)
4824 4817 {
4825 4818 hermon_state_t *state = ddi_get_soft_state(hermon_statep,
4826 4819 DEVI(dip)->devi_instance);
4827 4820 ddi_acc_handle_t pcihdl = hermon_get_pcihdl(state);
4828 4821 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state);
4829 4822 ddi_acc_handle_t msix_tbl_hdl = hermon_get_msix_tblhdl(state);
4830 4823 ddi_acc_handle_t msix_pba_hdl = hermon_get_msix_pbahdl(state);
4831 4824 uint32_t sem, reset_delay = state->hs_cfg_profile->cp_sw_reset_delay;
4832 4825 uint64_t data64;
4833 4826 uint32_t data32;
4834 4827 int status, i, j, loopcnt;
4835 4828 uint_t offset;
4836 4829
4837 4830 ASSERT(state != NULL);
4838 4831
4839 4832 /* start fastreboot */
4840 4833 state->hs_quiescing = B_TRUE;
4841 4834
4842 4835 /* If it's in maintenance mode, do nothing but return with SUCCESS */
4843 4836 if (!HERMON_IS_OPERATIONAL(state->hs_operational_mode)) {
4844 4837 return (DDI_SUCCESS);
4845 4838 }
4846 4839
4847 4840 /* suppress Hermon FM ereports */
4848 4841 if (hermon_get_state(state) & HCA_EREPORT_FM) {
4849 4842 hermon_clr_state_nolock(state, HCA_EREPORT_FM);
4850 4843 }
4851 4844
4852 4845 /* Shutdown HCA ports */
4853 4846 if (hermon_hca_ports_shutdown(state,
4854 4847 state->hs_cfg_profile->cp_num_ports) != HERMON_CMD_SUCCESS) {
4855 4848 state->hs_quiescing = B_FALSE;
4856 4849 return (DDI_FAILURE);
4857 4850 }
4858 4851
4859 4852 /* Close HCA */
4860 4853 if (hermon_close_hca_cmd_post(state, HERMON_CMD_NOSLEEP_SPIN) !=
4861 4854 HERMON_CMD_SUCCESS) {
4862 4855 state->hs_quiescing = B_FALSE;
4863 4856 return (DDI_FAILURE);
4864 4857 }
4865 4858
4866 4859 /* Disable interrupts */
4867 4860 if (hermon_intr_disable(state) != DDI_SUCCESS) {
4868 4861 state->hs_quiescing = B_FALSE;
4869 4862 return (DDI_FAILURE);
4870 4863 }
4871 4864
4872 4865 /*
4873 4866 * Query the PCI capabilities of the HCA device, but don't process
4874 4867 * the VPD until after reset.
4875 4868 */
4876 4869 if (hermon_pci_capability_list(state, pcihdl) != DDI_SUCCESS) {
4877 4870 state->hs_quiescing = B_FALSE;
4878 4871 return (DDI_FAILURE);
4879 4872 }
4880 4873
4881 4874 /*
4882 4875 * Read all PCI config info (reg0...reg63). Note: According to the
4883 4876 * Hermon software reset application note, we should not read or
4884 4877 * restore the values in reg22 and reg23.
4885 4878 * NOTE: For Hermon (and Arbel too) it says to restore the command
4886 4879 * register LAST, and technically, you need to restore the
4887 4880 * PCIE Capability "device control" and "link control" (word-sized,
4888 4881 * at offsets 0x08 and 0x10 from the capbility ID respectively).
4889 4882 * We hold off restoring the command register - offset 0x4 - till last
4890 4883 */
4891 4884
4892 4885 /* 1st, wait for the semaphore assure accessibility - per PRM */
4893 4886 status = -1;
4894 4887 for (i = 0; i < NANOSEC/MICROSEC /* 1sec timeout */; i++) {
4895 4888 sem = ddi_get32(cmdhdl, state->hs_cmd_regs.sw_semaphore);
4896 4889 if (sem == 0) {
4897 4890 status = 0;
4898 4891 break;
4899 4892 }
4900 4893 drv_usecwait(1);
4901 4894 }
4902 4895
4903 4896 /* Check if timeout happens */
4904 4897 if (status == -1) {
4905 4898 state->hs_quiescing = B_FALSE;
4906 4899 return (DDI_FAILURE);
4907 4900 }
4908 4901
4909 4902 /* MSI-X interrupts are used, save the MSI-X table */
4910 4903 if (msix_tbl_hdl && msix_pba_hdl) {
4911 4904 /* save MSI-X table */
4912 4905 for (i = 0; i < get_msix_tbl_size(state->hs_dip);
4913 4906 i += PCI_MSIX_VECTOR_SIZE) {
4914 4907 for (j = 0; j < PCI_MSIX_VECTOR_SIZE; j += 4) {
4915 4908 char *addr = state->hs_msix_tbl_addr + i + j;
4916 4909 data32 = ddi_get32(msix_tbl_hdl,
4917 4910 (uint32_t *)(uintptr_t)addr);
4918 4911 *(uint32_t *)(uintptr_t)(state->
4919 4912 hs_msix_tbl_entries + i + j) = data32;
4920 4913 }
4921 4914 }
4922 4915 /* save MSI-X PBA */
4923 4916 for (i = 0; i < get_msix_pba_size(state->hs_dip); i += 8) {
4924 4917 char *addr = state->hs_msix_pba_addr + i;
4925 4918 data64 = ddi_get64(msix_pba_hdl,
4926 4919 (uint64_t *)(uintptr_t)addr);
4927 4920 *(uint64_t *)(uintptr_t)(state->
4928 4921 hs_msix_pba_entries + i) = data64;
4929 4922 }
4930 4923 }
4931 4924
4932 4925 /* save PCI config space */
4933 4926 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
4934 4927 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
4935 4928 (i != HERMON_SW_RESET_REG23_RSVD)) {
4936 4929 state->hs_cfg_data[i] =
4937 4930 pci_config_get32(pcihdl, i << 2);
4938 4931 }
4939 4932 }
4940 4933
4941 4934 /* SW-reset HCA */
4942 4935 ddi_put32(cmdhdl, state->hs_cmd_regs.sw_reset, HERMON_SW_RESET_START);
4943 4936
4944 4937 /*
4945 4938 * This delay is required so as not to cause a panic here. If the
4946 4939 * device is accessed too soon after reset it will not respond to
4947 4940 * config cycles, causing a Master Abort and panic.
4948 4941 */
4949 4942 drv_usecwait(reset_delay);
4950 4943
4951 4944 /* Poll waiting for the device to finish resetting */
4952 4945 loopcnt = 100; /* 100 times @ 100 usec - total delay 10 msec */
4953 4946 while ((pci_config_get32(pcihdl, 0) & 0x0000FFFF) != PCI_VENID_MLX) {
4954 4947 drv_usecwait(HERMON_SW_RESET_POLL_DELAY);
4955 4948 if (--loopcnt == 0)
4956 4949 break; /* just in case, break and go on */
4957 4950 }
4958 4951 if (loopcnt == 0) {
4959 4952 state->hs_quiescing = B_FALSE;
4960 4953 return (DDI_FAILURE);
4961 4954 }
4962 4955
4963 4956 /* Restore the config info */
4964 4957 for (i = 0; i < HERMON_SW_RESET_NUMREGS; i++) {
4965 4958 if (i == 1) continue; /* skip the status/ctrl reg */
4966 4959 if ((i != HERMON_SW_RESET_REG22_RSVD) &&
4967 4960 (i != HERMON_SW_RESET_REG23_RSVD)) {
4968 4961 pci_config_put32(pcihdl, i << 2, state->hs_cfg_data[i]);
4969 4962 }
4970 4963 }
4971 4964
4972 4965 /* If MSI-X interrupts are used, restore the MSI-X table */
4973 4966 if (msix_tbl_hdl && msix_pba_hdl) {
4974 4967 /* restore MSI-X PBA */
4975 4968 for (i = 0; i < get_msix_pba_size(state->hs_dip); i += 8) {
4976 4969 char *addr = state->hs_msix_pba_addr + i;
4977 4970 data64 = *(uint64_t *)(uintptr_t)
4978 4971 (state->hs_msix_pba_entries + i);
4979 4972 ddi_put64(msix_pba_hdl,
4980 4973 (uint64_t *)(uintptr_t)addr, data64);
4981 4974 }
4982 4975 /* restore MSI-X table */
4983 4976 for (i = 0; i < get_msix_tbl_size(state->hs_dip);
4984 4977 i += PCI_MSIX_VECTOR_SIZE) {
4985 4978 for (j = 0; j < PCI_MSIX_VECTOR_SIZE; j += 4) {
4986 4979 char *addr = state->hs_msix_tbl_addr + i + j;
4987 4980 data32 = *(uint32_t *)(uintptr_t)
4988 4981 (state->hs_msix_tbl_entries + i + j);
4989 4982 ddi_put32(msix_tbl_hdl,
4990 4983 (uint32_t *)(uintptr_t)addr, data32);
4991 4984 }
4992 4985 }
4993 4986 }
4994 4987
4995 4988 /*
4996 4989 * PCI Express Capability - we saved during capability list, and
4997 4990 * we'll restore them here.
4998 4991 */
4999 4992 offset = state->hs_pci_cap_offset;
5000 4993 data32 = state->hs_pci_cap_devctl;
5001 4994 pci_config_put32(pcihdl, offset + HERMON_PCI_CAP_DEV_OFFS, data32);
5002 4995 data32 = state->hs_pci_cap_lnkctl;
5003 4996 pci_config_put32(pcihdl, offset + HERMON_PCI_CAP_LNK_OFFS, data32);
5004 4997
5005 4998 /* restore the command register */
5006 4999 pci_config_put32(pcihdl, 0x04, (state->hs_cfg_data[1] | 0x0006));
5007 5000
5008 5001 return (DDI_SUCCESS);
5009 5002 }
↓ open down ↓ |
3946 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX