Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_event.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_event.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon_event.c
28 28 * Hermon Interrupt and Event Processing Routines
29 29 *
30 30 * Implements all the routines necessary for allocating, freeing, and
31 31 * handling all of the various event types that the Hermon hardware can
32 32 * generate.
33 33 * These routines include the main Hermon interrupt service routine
34 34 * (hermon_isr()) as well as all the code necessary to setup and handle
35 35 * events from each of the many event queues used by the Hermon device.
36 36 */
37 37
38 38 #include <sys/types.h>
39 39 #include <sys/conf.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/sunddi.h>
42 42 #include <sys/modctl.h>
43 43
44 44 #include <sys/ib/adapters/hermon/hermon.h>
45 45
46 46 static void hermon_eq_poll(hermon_state_t *state, hermon_eqhdl_t eq);
47 47 static void hermon_eq_catastrophic(hermon_state_t *state);
48 48 static int hermon_eq_alloc(hermon_state_t *state, uint32_t log_eq_size,
49 49 uint_t intr, hermon_eqhdl_t *eqhdl);
50 50 static int hermon_eq_free(hermon_state_t *state, hermon_eqhdl_t *eqhdl);
51 51 static int hermon_eq_handler_init(hermon_state_t *state, hermon_eqhdl_t eq,
52 52 uint_t evt_type_mask, int (*eqfunc)(hermon_state_t *state,
53 53 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe));
54 54 static int hermon_eq_handler_fini(hermon_state_t *state, hermon_eqhdl_t eq);
55 55 static int hermon_port_state_change_handler(hermon_state_t *state,
56 56 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
57 57 static int hermon_comm_estbl_handler(hermon_state_t *state,
58 58 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
59 59 static int hermon_local_wq_cat_err_handler(hermon_state_t *state,
60 60 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
61 61 static int hermon_invreq_local_wq_err_handler(hermon_state_t *state,
62 62 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
63 63 static int hermon_local_acc_vio_wq_err_handler(hermon_state_t *state,
64 64 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
65 65 static int hermon_sendq_drained_handler(hermon_state_t *state,
66 66 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
67 67 static int hermon_path_mig_handler(hermon_state_t *state,
68 68 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
69 69 static int hermon_path_mig_err_handler(hermon_state_t *state,
70 70 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
71 71 static int hermon_catastrophic_handler(hermon_state_t *state,
72 72 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
73 73 static int hermon_srq_last_wqe_reached_handler(hermon_state_t *state,
74 74 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
75 75 static int hermon_fexch_error_handler(hermon_state_t *state,
76 76 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe);
77 77 static int hermon_no_eqhandler(hermon_state_t *state, hermon_eqhdl_t eq,
78 78 hermon_hw_eqe_t *eqe);
79 79 static int hermon_eq_demux(hermon_state_t *state, hermon_eqhdl_t eq,
80 80 hermon_hw_eqe_t *eqe);
81 81
82 82 /*
83 83 * hermon_eq_init_all
84 84 * Context: Only called from attach() path context
85 85 */
86 86 int
87 87 hermon_eq_init_all(hermon_state_t *state)
88 88 {
89 89 uint_t log_eq_size, intr_num;
90 90 uint_t num_eq, num_eq_init, num_eq_unmap, num_eq_rsvd;
91 91 uint32_t event_mask; /* used for multiple event types */
92 92 int status, i, num_extra;
93 93 struct hermon_sw_eq_s **eq;
94 94 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state);
95 95
96 96 /* initialize the FMA retry loop */
97 97 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
98 98
99 99 /*
100 100 * For now, all Event Queues default to the same size (pulled from
101 101 * the current configuration profile) and are all assigned to the
102 102 * same interrupt or MSI. In the future we may support assigning
103 103 * EQs to specific interrupts or MSIs XXX
104 104 */
105 105 log_eq_size = state->hs_cfg_profile->cp_log_eq_sz;
106 106
107 107 /*
108 108 * Total number of supported EQs is fixed. Hermon hardware
109 109 * supports up to 512 EQs, though in theory they will one day be
110 110 * alloc'd to virtual HCA's. We are currently using only 47 of them
111 111 * - that is, in Arbel and Tavor, before HERMON, where
112 112 * we had set aside the first 32 for use with Completion Queues (CQ)
113 113 * and reserved a few of the other 32 for each specific class of event
114 114 *
115 115 * However, with the coming of vitualization, we'll have only 4 per
116 116 * potential guest - so, we'll try alloc'ing them differntly
117 117 * (see below for more details).
118 118 */
119 119 num_eq = HERMON_NUM_EQ_USED;
120 120 num_eq_rsvd = state->hs_rsvd_eqs;
121 121 eq = &state->hs_eqhdl[num_eq_rsvd];
122 122
123 123 /*
124 124 * If MSI is to be used, then set intr_num to the MSI number.
125 125 * Otherwise, for fixed (i.e. 'legacy') interrupts,
126 126 * it is what the card tells us in 'inta_pin'.
127 127 */
128 128 if (state->hs_intr_type_chosen == DDI_INTR_TYPE_FIXED) {
129 129 intr_num = state->hs_adapter.inta_pin;
130 130 num_extra = 0;
131 131 } else {
132 132 /* If we have more than one MSI-X vector, init them. */
133 133 for (i = 0; i + 1 < state->hs_intrmsi_allocd; i++) {
134 134 status = hermon_eq_alloc(state, log_eq_size, i, &eq[i]);
135 135 if (status != DDI_SUCCESS) {
136 136 while (--i >= 0) {
137 137 (void) hermon_eq_handler_fini(state,
138 138 eq[i]);
139 139 (void) hermon_eq_free(state, &eq[i]);
140 140 }
141 141 return (DDI_FAILURE);
142 142 }
143 143
144 144 (void) hermon_eq_handler_init(state, eq[i],
145 145 HERMON_EVT_NO_MASK, hermon_cq_handler);
146 146 }
147 147 intr_num = i;
148 148 num_extra = i;
149 149 }
150 150
151 151 /*
152 152 * Allocate and initialize the rest of the Event Queues to be used.
153 153 * If any of these EQ allocations fail then jump to the end, cleanup
154 154 * what had been successfully initialized, and return an error.
155 155 */
156 156 for (i = 0; i < num_eq; i++) {
157 157 status = hermon_eq_alloc(state, log_eq_size, intr_num,
158 158 &eq[num_extra + i]);
159 159 if (status != DDI_SUCCESS) {
160 160 num_eq_init = i;
161 161 goto all_eq_init_fail;
162 162 }
163 163 }
164 164 num_eq_init = num_eq;
165 165 /*
166 166 * The "num_eq_unmap" variable is used in any possible failure
167 167 * cleanup (below) to indicate which events queues might require
168 168 * possible event class unmapping.
169 169 */
170 170 num_eq_unmap = 0;
171 171
172 172 /*
173 173 * Setup EQ0 (first avail) for use with Completion Queues. Note: We can
174 174 * cast the return value to void here because, when we use the
175 175 * HERMON_EVT_NO_MASK flag, it is not possible for
176 176 * hermon_eq_handler_init() to return an error.
177 177 */
178 178 (void) hermon_eq_handler_init(state, eq[num_eq_unmap + num_extra],
179 179 HERMON_EVT_NO_MASK, hermon_cq_handler);
180 180
181 181 num_eq_unmap++;
182 182
183 183 /*
184 184 * Setup EQ1 for handling Completion Queue Error Events.
185 185 *
186 186 * These events include things like CQ overflow or CQ access
187 187 * violation errors. If this setup fails for any reason (which, in
188 188 * general, it really never should), then jump to the end, cleanup
189 189 * everything that has been successfully initialized, and return an
190 190 * error.
191 191 */
192 192 status = hermon_eq_handler_init(state, eq[num_eq_unmap + num_extra],
193 193 HERMON_EVT_MSK_CQ_ERRORS, hermon_cq_err_handler);
194 194 if (status != DDI_SUCCESS) {
195 195 goto all_eq_init_fail;
196 196 }
197 197 state->hs_cq_erreqnum = num_eq_unmap + num_extra + num_eq_rsvd;
198 198 num_eq_unmap++;
199 199
200 200 /*
201 201 * Setup EQ2 for handling most other things including:
202 202 *
203 203 * Port State Change Events
204 204 * These events include things like Port Up and Port Down events.
205 205 *
206 206 * Communication Established Events
207 207 * These events correspond to the IB affiliated asynchronous events
208 208 * that are used for connection management
209 209 *
210 210 * Path Migration Succeeded Events
211 211 * These evens corresponid to the IB affiliated asynchronous events
212 212 * that are used to indicate successful completion of a
213 213 * Path Migration.
214 214 *
215 215 * Command Completion Events
216 216 * These events correspond to the Arbel generated events that are used
217 217 * to indicate Arbel firmware command completion.
218 218 *
219 219 * Local WQ Catastrophic Error Events
220 220 * Invalid Req Local WQ Error Events
221 221 * Local Access Violation WQ Error Events
222 222 * SRQ Catastrophic Error Events
223 223 * SRQ Last WQE Reached Events
224 224 * ECC error detection events
225 225 * These events also correspond to the similarly-named IB affiliated
226 226 * asynchronous error type.
227 227 *
228 228 * Send Queue Drained Events
229 229 * These events correspond to the IB affiliated asynchronous events
230 230 * that are used to indicate completion of a Send Queue Drained QP
231 231 * state transition.
232 232 *
233 233 * Path Migration Failed Events
234 234 * These events correspond to the IB affiliated asynchronous events
235 235 * that are used to indicate that path migration was not successful.
236 236 *
237 237 * Fibre Channel Error Event
238 238 * This event is affiliated with an Fexch QP.
239 239 *
240 240 * NOTE: When an event fires on this EQ, it will demux the type and
241 241 * send it to the right specific handler routine
242 242 *
243 243 */
244 244 event_mask =
245 245 HERMON_EVT_MSK_PORT_STATE_CHANGE |
246 246 HERMON_EVT_MSK_COMM_ESTABLISHED |
247 247 HERMON_EVT_MSK_COMMAND_INTF_COMP |
248 248 HERMON_EVT_MSK_LOCAL_WQ_CAT_ERROR |
249 249 HERMON_EVT_MSK_INV_REQ_LOCAL_WQ_ERROR |
250 250 HERMON_EVT_MSK_LOCAL_ACC_VIO_WQ_ERROR |
251 251 HERMON_EVT_MSK_SEND_QUEUE_DRAINED |
252 252 HERMON_EVT_MSK_PATH_MIGRATED |
253 253 HERMON_EVT_MSK_PATH_MIGRATE_FAILED |
254 254 HERMON_EVT_MSK_SRQ_CATASTROPHIC_ERROR |
255 255 HERMON_EVT_MSK_SRQ_LAST_WQE_REACHED |
256 256 HERMON_EVT_MSK_FEXCH_ERROR;
257 257
258 258 status = hermon_eq_handler_init(state, eq[num_eq_unmap + num_extra],
259 259 event_mask, hermon_eq_demux);
260 260 if (status != DDI_SUCCESS) {
261 261 goto all_eq_init_fail;
262 262 }
263 263 num_eq_unmap++;
264 264
265 265 /*
266 266 * Setup EQ3 to catch all other types of events. Specifically, we
267 267 * do not catch the "Local EEC Catastrophic Error Event" because we
268 268 * should have no EEC (the Arbel driver does not support RD). We also
269 269 * choose not to handle any of the address translation page fault
270 270 * event types. Since we are not doing any page fault handling (and
271 271 * since the Arbel firmware does not currently support any such
272 272 * handling), we allow these events to go to the catch-all handler.
273 273 */
274 274 status = hermon_eq_handler_init(state, eq[num_eq_unmap + num_extra],
275 275 HERMON_EVT_CATCHALL_MASK, hermon_no_eqhandler);
276 276 if (status != DDI_SUCCESS) {
277 277 goto all_eq_init_fail;
278 278 }
279 279 num_eq_unmap++;
280 280
281 281 /* the FMA retry loop starts. */
282 282 hermon_pio_start(state, uarhdl, all_eq_init_fail, fm_loop_cnt,
283 283 fm_status, fm_test);
284 284
285 285 /*
286 286 * Run through and initialize the Consumer Index for each EQC.
287 287 */
288 288 for (i = 0; i < num_eq + num_extra; i++) {
289 289 ddi_put32(uarhdl, eq[i]->eq_doorbell, 0x0);
290 290 }
291 291
292 292 /* the FMA retry loop ends. */
293 293 hermon_pio_end(state, uarhdl, all_eq_init_fail, fm_loop_cnt,
294 294 fm_status, fm_test);
295 295
296 296 return (DDI_SUCCESS);
297 297
298 298 all_eq_init_fail:
299 299
300 300 /* Unmap any of the partially mapped EQs from above */
301 301 for (i = 0; i < num_eq_unmap + num_extra; i++) {
302 302 (void) hermon_eq_handler_fini(state, eq[i]);
303 303 }
304 304
305 305 /* Free up any of the partially allocated EQs from above */
306 306 for (i = 0; i < num_eq_init + num_extra; i++) {
307 307 (void) hermon_eq_free(state, &eq[i]);
308 308 }
309 309
310 310 /* If a HW error happen during ddi_pio, return DDI_FAILURE */
311 311 if (fm_status == HCA_PIO_PERSISTENT) {
312 312 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL);
313 313 status = DDI_FAILURE;
314 314 }
315 315
316 316 return (status);
317 317 }
318 318
319 319
320 320 /*
321 321 * hermon_eq_fini_all
322 322 * Context: Only called from attach() and/or detach() path contexts
323 323 */
324 324 int
325 325 hermon_eq_fini_all(hermon_state_t *state)
326 326 {
327 327 uint_t num_eq, num_eq_rsvd;
328 328 int status, i;
329 329 struct hermon_sw_eq_s **eq;
330 330
331 331 /*
332 332 * Grab the total number of supported EQs again. This is the same
333 333 * hardcoded value that was used above (during the event queue
334 334 * initialization.)
335 335 */
336 336 num_eq = HERMON_NUM_EQ_USED + state->hs_intrmsi_allocd - 1;
337 337 num_eq_rsvd = state->hs_rsvd_eqs;
338 338 eq = &state->hs_eqhdl[num_eq_rsvd];
339 339
340 340 /*
341 341 * For each of the event queues that we initialized and mapped
342 342 * earlier, attempt to unmap the events from the EQ.
343 343 */
344 344 for (i = 0; i < num_eq; i++) {
345 345 status = hermon_eq_handler_fini(state, eq[i]);
346 346 if (status != DDI_SUCCESS) {
347 347 return (DDI_FAILURE);
348 348 }
349 349 }
350 350
351 351 /*
352 352 * Teardown and free up all the Event Queues that were allocated
353 353 * earlier.
354 354 */
355 355 for (i = 0; i < num_eq; i++) {
356 356 status = hermon_eq_free(state, &eq[i]);
357 357 if (status != DDI_SUCCESS) {
358 358 return (DDI_FAILURE);
359 359 }
360 360 }
361 361
362 362 return (DDI_SUCCESS);
363 363 }
364 364
365 365
366 366 /*
367 367 * hermon_eq_reset_uar_baseaddr
368 368 * Context: Only called from attach()
369 369 */
370 370 void
371 371 hermon_eq_reset_uar_baseaddr(hermon_state_t *state)
372 372 {
373 373 int i, num_eq;
374 374 hermon_eqhdl_t eq, *eqh;
375 375
376 376 num_eq = HERMON_NUM_EQ_USED + state->hs_intrmsi_allocd - 1;
377 377 eqh = &state->hs_eqhdl[state->hs_rsvd_eqs];
378 378 for (i = 0; i < num_eq; i++) {
379 379 eq = eqh[i];
380 380 eq->eq_doorbell = (uint32_t *)
381 381 ((uintptr_t)state->hs_reg_uar_baseaddr +
382 382 (uint32_t)ARM_EQ_INDEX(eq->eq_eqnum));
383 383 }
384 384 }
385 385
386 386
387 387 /*
388 388 * hermon_eq_arm_all
389 389 * Context: Only called from attach() and/or detach() path contexts
390 390 */
391 391 int
392 392 hermon_eq_arm_all(hermon_state_t *state)
393 393 {
394 394 uint_t num_eq, num_eq_rsvd;
395 395 uint64_t offset;
396 396 hermon_eqhdl_t eq;
397 397 uint32_t eq_ci;
398 398 int i;
399 399 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state);
400 400
401 401 /* initialize the FMA retry loop */
402 402 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
403 403
404 404 num_eq = HERMON_NUM_EQ_USED + state->hs_intrmsi_allocd - 1;
405 405 num_eq_rsvd = state->hs_rsvd_eqs;
406 406
407 407 /* the FMA retry loop starts. */
408 408 hermon_pio_start(state, uarhdl, pio_error, fm_loop_cnt, fm_status,
409 409 fm_test);
410 410
411 411 for (i = 0; i < num_eq; i++) {
412 412 offset = ARM_EQ_INDEX(i + num_eq_rsvd);
413 413 eq = state->hs_eqhdl[i + num_eq_rsvd];
414 414 eq_ci = (eq->eq_consindx & HERMON_EQ_CI_MASK) | EQ_ARM_BIT;
415 415 ddi_put32(uarhdl,
416 416 (uint32_t *)((uintptr_t)state->hs_reg_uar_baseaddr +
417 417 (uint32_t)offset), eq_ci);
418 418 }
419 419
420 420 /* the FMA retry loop ends. */
421 421 hermon_pio_end(state, uarhdl, pio_error, fm_loop_cnt, fm_status,
422 422 fm_test);
423 423
424 424 return (DDI_SUCCESS);
425 425
426 426 pio_error:
427 427 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL);
428 428 return (DDI_FAILURE);
429 429 }
430 430
431 431
432 432 /*
433 433 * hermon_isr()
434 434 * Context: Only called from interrupt context (and during panic)
435 435 */
436 436 uint_t
437 437 hermon_isr(caddr_t arg1, caddr_t arg2)
438 438 {
439 439 hermon_state_t *state;
440 440 int i, r;
441 441 int intr;
442 442
443 443 /*
444 444 * Grab the Hermon softstate pointer from the input parameter
445 445 */
446 446 state = (hermon_state_t *)(void *)arg1;
447 447
448 448 /* Get the interrupt number */
449 449 intr = (int)(uintptr_t)arg2;
450 450
451 451 /*
452 452 * Clear the interrupt. Note: This is only needed for
453 453 * fixed interrupts as the framework does what is needed for
454 454 * MSI-X interrupts.
455 455 */
456 456 if (state->hs_intr_type_chosen == DDI_INTR_TYPE_FIXED) {
457 457 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state);
458 458
459 459 /* initialize the FMA retry loop */
460 460 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
461 461
462 462 /* the FMA retry loop starts. */
463 463 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt,
464 464 fm_status, fm_test);
465 465
466 466 ddi_put64(cmdhdl, state->hs_cmd_regs.clr_intr,
467 467 (uint64_t)1 << state->hs_adapter.inta_pin);
468 468
469 469 /* the FMA retry loop ends. */
470 470 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
471 471 fm_test);
472 472 }
473 473
474 474 /*
475 475 * Loop through all the EQs looking for ones that have "fired".
476 476 * To determine if an EQ is fired, the ownership will be the SW
477 477 * (the HW will set the owner appropriately). Update the Consumer Index
478 478 * of the Event Queue Entry (EQE) and pass it to HW by writing it
479 479 * to the respective Set CI DB Register.
480 480 *
481 481 * The "else" case handles the extra EQs used only for completion
482 482 * events, whereas the "if" case deals with the required interrupt
483 483 * vector that is used for all classes of events.
484 484 */
485 485 r = state->hs_rsvd_eqs;
486 486
487 487 if (intr + 1 == state->hs_intrmsi_allocd) { /* last intr */
488 488 r += state->hs_intrmsi_allocd - 1;
489 489 for (i = 0; i < HERMON_NUM_EQ_USED; i++) {
490 490 hermon_eq_poll(state, state->hs_eqhdl[i + r]);
491 491 }
492 492 } else { /* only poll the one EQ */
493 493 hermon_eq_poll(state, state->hs_eqhdl[intr + r]);
494 494 }
495 495
496 496 return (DDI_INTR_CLAIMED);
497 497
498 498 pio_error:
499 499 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_FATAL);
500 500 return (DDI_INTR_UNCLAIMED);
501 501 }
502 502
503 503
504 504 /*
505 505 * hermon_eq_poll
506 506 * Context: Only called from interrupt context (and during panic)
507 507 */
508 508 static void
509 509 hermon_eq_poll(hermon_state_t *state, hermon_eqhdl_t eq)
510 510 {
↓ open down ↓ |
510 lines elided |
↑ open up ↑ |
511 511 hermon_hw_eqe_t *eqe;
512 512 int polled_some;
513 513 uint32_t cons_indx, wrap_around_mask, shift;
514 514 int (*eqfunction)(hermon_state_t *state, hermon_eqhdl_t eq,
515 515 hermon_hw_eqe_t *eqe);
516 516 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state);
517 517
518 518 /* initialize the FMA retry loop */
519 519 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
520 520
521 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eq))
522 -
523 521 /* Get the consumer pointer index */
524 522 cons_indx = eq->eq_consindx;
525 523 shift = eq->eq_log_eqsz - HERMON_EQE_OWNER_SHIFT;
526 524
527 525 /*
528 526 * Calculate the wrap around mask. Note: This operation only works
529 527 * because all Hermon event queues have power-of-2 sizes
530 528 */
531 529 wrap_around_mask = (eq->eq_bufsz - 1);
532 530
533 531 /* Calculate the pointer to the first EQ entry */
534 532 eqe = &eq->eq_buf[(cons_indx & wrap_around_mask)];
535 533
536 534
537 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eqe))
538 -
539 535 /*
540 536 * Pull the handler function for this EQ from the Hermon Event Queue
541 537 * handle
542 538 */
543 539 eqfunction = eq->eq_func;
544 540
545 541 for (;;) {
546 542 polled_some = 0;
547 543 while (HERMON_EQE_OWNER_IS_SW(eq, eqe, cons_indx, shift)) {
548 544
549 545 /*
550 546 * Call the EQ handler function. But only call if we
551 547 * are not in polled I/O mode (i.e. not processing
552 548 * because of a system panic). Note: We don't call
553 549 * the EQ handling functions from a system panic
554 550 * because we are primarily concerned only with
555 551 * ensuring that the event queues do not overflow (or,
556 552 * more specifically, the event queue associated with
557 553 * the CQ that is being used in the sync/dump process).
558 554 * Also, we don't want to make any upcalls (to the
559 555 * IBTF) because we can't guarantee when/if those
560 556 * calls would ever return. And, if we're in panic,
561 557 * then we reached here through a PollCQ() call (from
562 558 * hermon_cq_poll()), and we need to ensure that we
563 559 * successfully return any work completions to the
564 560 * caller.
565 561 */
566 562 if (ddi_in_panic() == 0) {
567 563 eqfunction(state, eq, eqe);
568 564 }
569 565
570 566 /* Reset to hardware ownership is implicit */
571 567
572 568 /* Increment the consumer index */
573 569 cons_indx++;
574 570
575 571 /* Update the pointer to the next EQ entry */
576 572 eqe = &eq->eq_buf[(cons_indx & wrap_around_mask)];
577 573
578 574 polled_some = 1;
579 575 }
580 576
581 577 /*
582 578 * write consumer index via EQ set CI Doorbell, to keep overflow
583 579 * from occuring during poll
584 580 */
585 581
586 582 eq->eq_consindx = cons_indx;
587 583
588 584 /* the FMA retry loop starts. */
589 585 hermon_pio_start(state, uarhdl, pio_error, fm_loop_cnt,
590 586 fm_status, fm_test);
591 587
592 588 ddi_put32(uarhdl, eq->eq_doorbell,
593 589 (cons_indx & HERMON_EQ_CI_MASK) | EQ_ARM_BIT);
594 590
595 591 /* the FMA retry loop starts. */
596 592 hermon_pio_end(state, uarhdl, pio_error, fm_loop_cnt,
597 593 fm_status, fm_test);
598 594
599 595 if (polled_some == 0)
600 596 break;
601 597 };
602 598 return;
603 599
604 600 pio_error:
605 601 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_FATAL);
606 602 }
607 603
608 604
609 605 /*
610 606 * hermon_eq_catastrophic
611 607 * Context: Only called from interrupt context (and during panic)
612 608 */
613 609 static void
614 610 hermon_eq_catastrophic(hermon_state_t *state)
615 611 {
616 612 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state);
617 613 ibt_async_code_t type;
618 614 ibc_async_event_t event;
619 615 uint32_t *base_addr;
620 616 uint32_t buf_size;
621 617 uint32_t word;
622 618 uint8_t err_type;
623 619 uint32_t err_buf;
624 620 int i;
625 621
626 622 /* initialize the FMA retry loop */
627 623 hermon_pio_init(fm_loop_cnt, fm_status, fm_test);
628 624
629 625 bzero(&event, sizeof (ibc_async_event_t));
630 626 base_addr = state->hs_cmd_regs.fw_err_buf;
631 627
632 628 buf_size = state->hs_fw.error_buf_sz; /* in #dwords */
633 629
634 630 /* the FMA retry loop starts. */
635 631 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
636 632 fm_test);
637 633
638 634 word = ddi_get32(cmdhdl, base_addr);
639 635
640 636 /* the FMA retry loop ends. */
641 637 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
642 638 fm_test);
643 639
644 640 err_type = (word & 0xFF000000) >> 24;
645 641 type = IBT_ERROR_LOCAL_CATASTROPHIC;
646 642
647 643 switch (err_type) {
648 644 case HERMON_CATASTROPHIC_INTERNAL_ERROR:
649 645 cmn_err(CE_WARN, "Catastrophic Internal Error: 0x%02x",
650 646 err_type);
651 647
652 648 break;
653 649
654 650 case HERMON_CATASTROPHIC_UPLINK_BUS_ERROR:
655 651 cmn_err(CE_WARN, "Catastrophic Uplink Bus Error: 0x%02x",
656 652 err_type);
657 653
658 654 break;
659 655
660 656 case HERMON_CATASTROPHIC_DDR_DATA_ERROR:
661 657 cmn_err(CE_WARN, "Catastrophic DDR Data Error: 0x%02x",
662 658 err_type);
663 659
664 660 break;
665 661
666 662 case HERMON_CATASTROPHIC_INTERNAL_PARITY_ERROR:
667 663 cmn_err(CE_WARN, "Catastrophic Internal Parity Error: 0x%02x",
668 664 err_type);
669 665
670 666 break;
671 667
672 668 default:
673 669 /* Unknown type of Catastrophic error */
674 670 cmn_err(CE_WARN, "Catastrophic Unknown Error: 0x%02x",
675 671 err_type);
676 672
677 673 break;
678 674 }
679 675
680 676 /* the FMA retry loop starts. */
681 677 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
682 678 fm_test);
683 679
684 680 /*
685 681 * Read in the catastrophic error buffer from the hardware.
686 682 */
687 683 for (i = 0; i < buf_size; i++) {
688 684 base_addr =
689 685 (state->hs_cmd_regs.fw_err_buf + i);
690 686 err_buf = ddi_get32(cmdhdl, base_addr);
691 687 cmn_err(CE_NOTE, "hermon%d: catastrophic_error[%02x]: %08X",
692 688 state->hs_instance, i, err_buf);
693 689 }
694 690
695 691 /* the FMA retry loop ends. */
696 692 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status,
697 693 fm_test);
698 694
699 695 /*
700 696 * We also call the IBTF here to inform it of the catastrophic error.
701 697 * Note: Since no event information (i.e. QP handles, CQ handles,
702 698 * etc.) is necessary, we pass a NULL pointer instead of a pointer to
703 699 * an empty ibc_async_event_t struct.
704 700 *
705 701 * But we also check if "hs_ibtfpriv" is NULL. If it is then it
706 702 * means that we've have either received this event before we
707 703 * finished attaching to the IBTF or we've received it while we
708 704 * are in the process of detaching.
709 705 */
710 706 if (state->hs_ibtfpriv != NULL) {
711 707 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
712 708 }
713 709
714 710 pio_error:
715 711 /* ignore these errors but log them because they're harmless. */
716 712 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL);
717 713 }
718 714
719 715
720 716 /*
721 717 * hermon_eq_alloc()
722 718 * Context: Only called from attach() path context
723 719 */
724 720 static int
725 721 hermon_eq_alloc(hermon_state_t *state, uint32_t log_eq_size, uint_t intr,
726 722 hermon_eqhdl_t *eqhdl)
727 723 {
728 724 hermon_rsrc_t *eqc, *rsrc;
729 725 hermon_hw_eqc_t eqc_entry;
730 726 hermon_eqhdl_t eq;
731 727 ibt_mr_attr_t mr_attr;
732 728 hermon_mr_options_t op;
733 729 hermon_pdhdl_t pd;
734 730 hermon_mrhdl_t mr;
735 731 hermon_hw_eqe_t *buf;
736 732 int status;
737 733
738 734 /* Use the internal protection domain (PD) for setting up EQs */
739 735 pd = state->hs_pdhdl_internal;
740 736
741 737 /* Increment the reference count on the protection domain (PD) */
742 738 hermon_pd_refcnt_inc(pd);
743 739
744 740 /*
745 741 * Allocate an EQ context entry. This will be filled in with all
746 742 * the necessary parameters to define the Event Queue. And then
747 743 * ownership will be passed to the hardware in the final step
748 744 * below. If we fail here, we must undo the protection domain
749 745 * reference count.
750 746 */
751 747 status = hermon_rsrc_alloc(state, HERMON_EQC, 1, HERMON_SLEEP, &eqc);
752 748 if (status != DDI_SUCCESS) {
753 749 status = DDI_FAILURE;
754 750 goto eqalloc_fail1;
755 751 }
756 752
757 753 /*
758 754 * Allocate the software structure for tracking the event queue (i.e.
759 755 * the Hermon Event Queue handle). If we fail here, we must undo the
760 756 * protection domain reference count and the previous resource
761 757 * allocation.
762 758 */
763 759 status = hermon_rsrc_alloc(state, HERMON_EQHDL, 1, HERMON_SLEEP, &rsrc);
764 760 if (status != DDI_SUCCESS) {
765 761 status = DDI_FAILURE;
766 762 goto eqalloc_fail2;
767 763 }
768 764
769 765 eq = (hermon_eqhdl_t)rsrc->hr_addr;
770 766
771 767 /*
772 768 * Allocate the memory for Event Queue.
773 769 */
774 770 eq->eq_eqinfo.qa_size = (1 << log_eq_size) * sizeof (hermon_hw_eqe_t);
775 771 eq->eq_eqinfo.qa_alloc_align = eq->eq_eqinfo.qa_bind_align = PAGESIZE;
776 772
777 773 eq->eq_eqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
778 774 status = hermon_queue_alloc(state, &eq->eq_eqinfo, HERMON_SLEEP);
779 775 if (status != DDI_SUCCESS) {
780 776 status = DDI_FAILURE;
781 777 goto eqalloc_fail3;
782 778 }
783 779
784 780 buf = (hermon_hw_eqe_t *)eq->eq_eqinfo.qa_buf_aligned;
785 781 /*
786 782 * Initializing each of the Event Queue Entries (EQE) by setting their
787 783 * ownership to hardware ("owner" bit set to HW) is now done by HW
788 784 * when the transfer of ownership (below) of the
789 785 * EQ context itself is done.
790 786 */
791 787
792 788 /*
793 789 * Register the memory for the EQ.
794 790 *
795 791 * Because we are in the attach path we use NOSLEEP here so that we
796 792 * SPIN in the HCR since the event queues are not setup yet, and we
797 793 * cannot NOSPIN at this point in time.
798 794 */
799 795
800 796 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
801 797 mr_attr.mr_len = eq->eq_eqinfo.qa_size;
802 798 mr_attr.mr_as = NULL;
↓ open down ↓ |
254 lines elided |
↑ open up ↑ |
803 799 mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
804 800 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
805 801 op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl;
806 802 op.mro_bind_override_addr = 0;
807 803 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
808 804 HERMON_EQ_CMPT);
809 805 if (status != DDI_SUCCESS) {
810 806 status = DDI_FAILURE;
811 807 goto eqalloc_fail4;
812 808 }
813 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
814 809
815 810 /*
816 811 * Fill in the EQC entry. This is the final step before passing
817 812 * ownership of the EQC entry to the Hermon hardware. We use all of
818 813 * the information collected/calculated above to fill in the
819 814 * requisite portions of the EQC. Note: We create all EQs in the
820 815 * "fired" state. We will arm them later (after our interrupt
821 816 * routine had been registered.)
822 817 */
823 818 bzero(&eqc_entry, sizeof (hermon_hw_eqc_t));
824 819 eqc_entry.state = HERMON_EQ_ARMED;
825 820 eqc_entry.log_eq_sz = log_eq_size;
826 821 eqc_entry.intr = intr;
827 822 eqc_entry.log2_pgsz = mr->mr_log2_pgsz;
828 823 eqc_entry.pg_offs = eq->eq_eqinfo.qa_pgoffs >> 5;
829 824 eqc_entry.mtt_base_addrh = (uint32_t)((mr->mr_mttaddr >> 32) & 0xFF);
830 825 eqc_entry.mtt_base_addrl = mr->mr_mttaddr >> 3;
831 826 eqc_entry.cons_indx = 0x0;
832 827 eqc_entry.prod_indx = 0x0;
833 828
834 829 /*
835 830 * Write the EQC entry to hardware. Lastly, we pass ownership of
836 831 * the entry to the hardware (using the Hermon SW2HW_EQ firmware
837 832 * command). Note: in general, this operation shouldn't fail. But
838 833 * if it does, we have to undo everything we've done above before
839 834 * returning error.
840 835 */
841 836 status = hermon_cmn_ownership_cmd_post(state, SW2HW_EQ, &eqc_entry,
842 837 sizeof (hermon_hw_eqc_t), eqc->hr_indx, HERMON_CMD_NOSLEEP_SPIN);
843 838 if (status != HERMON_CMD_SUCCESS) {
844 839 cmn_err(CE_NOTE, "hermon%d: SW2HW_EQ command failed: %08x\n",
845 840 state->hs_instance, status);
846 841 if (status == HERMON_CMD_INVALID_STATUS) {
847 842 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
848 843 }
849 844 status = ibc_get_ci_failure(0);
850 845 goto eqalloc_fail5;
851 846 }
852 847
853 848 /*
854 849 * Fill in the rest of the Hermon Event Queue handle. Having
855 850 * successfully transferred ownership of the EQC, we can update the
856 851 * following fields for use in further operations on the EQ.
857 852 */
858 853 eq->eq_eqcrsrcp = eqc;
859 854 eq->eq_rsrcp = rsrc;
860 855 eq->eq_consindx = 0;
861 856 eq->eq_eqnum = eqc->hr_indx;
862 857 eq->eq_buf = buf;
863 858 eq->eq_bufsz = (1 << log_eq_size);
864 859 eq->eq_log_eqsz = log_eq_size;
865 860 eq->eq_mrhdl = mr;
866 861 eq->eq_doorbell = (uint32_t *)((uintptr_t)state->hs_reg_uar_baseaddr +
867 862 (uint32_t)ARM_EQ_INDEX(eq->eq_eqnum));
868 863 *eqhdl = eq;
869 864
870 865 return (DDI_SUCCESS);
871 866
872 867 /*
873 868 * The following is cleanup for all possible failure cases in this routine
874 869 */
875 870 eqalloc_fail5:
876 871 if (hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
877 872 HERMON_NOSLEEP) != DDI_SUCCESS) {
878 873 HERMON_WARNING(state, "failed to deregister EQ memory");
879 874 }
880 875 eqalloc_fail4:
881 876 hermon_queue_free(&eq->eq_eqinfo);
882 877 eqalloc_fail3:
883 878 hermon_rsrc_free(state, &rsrc);
884 879 eqalloc_fail2:
885 880 hermon_rsrc_free(state, &eqc);
886 881 eqalloc_fail1:
887 882 hermon_pd_refcnt_dec(pd);
888 883 eqalloc_fail:
889 884 return (status);
890 885 }
891 886
892 887
893 888 /*
894 889 * hermon_eq_free()
895 890 * Context: Only called from attach() and/or detach() path contexts
896 891 */
897 892 static int
898 893 hermon_eq_free(hermon_state_t *state, hermon_eqhdl_t *eqhdl)
899 894 {
900 895 hermon_rsrc_t *eqc, *rsrc;
901 896 hermon_hw_eqc_t eqc_entry;
902 897 hermon_pdhdl_t pd;
903 898 hermon_mrhdl_t mr;
904 899 hermon_eqhdl_t eq;
905 900 uint32_t eqnum;
906 901 int status;
907 902
908 903 /*
909 904 * Pull all the necessary information from the Hermon Event Queue
910 905 * handle. This is necessary here because the resource for the
911 906 * EQ handle is going to be freed up as part of this operation.
912 907 */
913 908 eq = *eqhdl;
914 909 eqc = eq->eq_eqcrsrcp;
915 910 rsrc = eq->eq_rsrcp;
916 911 pd = state->hs_pdhdl_internal;
917 912 mr = eq->eq_mrhdl;
918 913 eqnum = eq->eq_eqnum;
919 914
920 915 /*
921 916 * Reclaim EQC entry from hardware (using the Hermon HW2SW_EQ
922 917 * firmware command). If the ownership transfer fails for any reason,
923 918 * then it is an indication that something (either in HW or SW) has
924 919 * gone seriously wrong.
925 920 */
926 921 status = hermon_cmn_ownership_cmd_post(state, HW2SW_EQ, &eqc_entry,
927 922 sizeof (hermon_hw_eqc_t), eqnum, HERMON_CMD_NOSLEEP_SPIN);
928 923 if (status != HERMON_CMD_SUCCESS) {
929 924 HERMON_WARNING(state, "failed to reclaim EQC ownership");
930 925 cmn_err(CE_CONT, "Hermon: HW2SW_EQ command failed: %08x\n",
931 926 status);
932 927 return (DDI_FAILURE);
933 928 }
934 929
935 930 /*
936 931 * Deregister the memory for the Event Queue. If this fails
937 932 * for any reason, then it is an indication that something (either
938 933 * in HW or SW) has gone seriously wrong. So we print a warning
939 934 * message and continue.
940 935 */
941 936 status = hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
942 937 HERMON_NOSLEEP);
943 938 if (status != DDI_SUCCESS) {
944 939 HERMON_WARNING(state, "failed to deregister EQ memory");
945 940 }
946 941
947 942 /* Free the memory for the EQ */
948 943 hermon_queue_free(&eq->eq_eqinfo);
949 944
950 945 /* Free the Hermon Event Queue handle */
951 946 hermon_rsrc_free(state, &rsrc);
952 947
953 948 /* Free up the EQC entry resource */
954 949 hermon_rsrc_free(state, &eqc);
955 950
956 951 /* Decrement the reference count on the protection domain (PD) */
957 952 hermon_pd_refcnt_dec(pd);
958 953
959 954 /* Set the eqhdl pointer to NULL and return success */
960 955 *eqhdl = NULL;
961 956
962 957 return (DDI_SUCCESS);
963 958 }
964 959
965 960
966 961 /*
967 962 * hermon_eq_handler_init
968 963 * Context: Only called from attach() path context
969 964 */
970 965 static int
971 966 hermon_eq_handler_init(hermon_state_t *state, hermon_eqhdl_t eq,
972 967 uint_t evt_type_mask, int (*eq_func)(hermon_state_t *state,
973 968 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe))
974 969 {
975 970 int status;
976 971
977 972 /*
978 973 * Save away the EQ handler function and the event type mask. These
979 974 * will be used later during interrupt and event queue processing.
980 975 */
981 976 eq->eq_func = eq_func;
982 977 eq->eq_evttypemask = evt_type_mask;
983 978
984 979 /*
985 980 * Map the EQ to a specific class of event (or events) depending
986 981 * on the mask value passed in. The HERMON_EVT_NO_MASK means not
987 982 * to attempt associating the EQ with any specific class of event.
988 983 * This is particularly useful when initializing the events queues
989 984 * used for CQ events. The mapping is done using the Hermon MAP_EQ
990 985 * firmware command. Note: This command should not, in general, fail.
991 986 * If it does, then something (probably HW related) has gone seriously
992 987 * wrong.
993 988 */
994 989 if (evt_type_mask != HERMON_EVT_NO_MASK) {
995 990 status = hermon_map_eq_cmd_post(state,
996 991 HERMON_CMD_MAP_EQ_EVT_MAP, eq->eq_eqnum, evt_type_mask,
997 992 HERMON_CMD_NOSLEEP_SPIN);
998 993 if (status != HERMON_CMD_SUCCESS) {
999 994 cmn_err(CE_NOTE, "hermon%d: MAP_EQ command failed: "
1000 995 "%08x\n", state->hs_instance, status);
1001 996 return (DDI_FAILURE);
1002 997 }
1003 998 }
1004 999
1005 1000 return (DDI_SUCCESS);
1006 1001 }
1007 1002
1008 1003
1009 1004 /*
1010 1005 * hermon_eq_handler_fini
1011 1006 * Context: Only called from attach() and/or detach() path contexts
1012 1007 */
1013 1008 static int
1014 1009 hermon_eq_handler_fini(hermon_state_t *state, hermon_eqhdl_t eq)
1015 1010 {
1016 1011 int status;
1017 1012
1018 1013 /*
1019 1014 * Unmap the EQ from the event class to which it had been previously
1020 1015 * mapped. The unmapping is done using the Hermon MAP_EQ (in much
1021 1016 * the same way that the initial mapping was done). The difference,
1022 1017 * however, is in the HERMON_EQ_EVT_UNMAP flag that is passed to the
1023 1018 * MAP_EQ firmware command. The HERMON_EVT_NO_MASK (which may have
1024 1019 * been passed in at init time) still means that no association has
1025 1020 * been made between the EQ and any specific class of event (and,
1026 1021 * hence, no unmapping is necessary). Note: This command should not,
1027 1022 * in general, fail. If it does, then something (probably HW related)
1028 1023 * has gone seriously wrong.
1029 1024 */
1030 1025 if (eq->eq_evttypemask != HERMON_EVT_NO_MASK) {
1031 1026 status = hermon_map_eq_cmd_post(state,
1032 1027 HERMON_CMD_MAP_EQ_EVT_UNMAP, eq->eq_eqnum,
1033 1028 eq->eq_evttypemask, HERMON_CMD_NOSLEEP_SPIN);
1034 1029 if (status != HERMON_CMD_SUCCESS) {
1035 1030 cmn_err(CE_NOTE, "hermon%d: MAP_EQ command failed: "
1036 1031 "%08x\n", state->hs_instance, status);
1037 1032 return (DDI_FAILURE);
1038 1033 }
1039 1034 }
1040 1035
1041 1036 return (DDI_SUCCESS);
1042 1037 }
1043 1038
1044 1039
1045 1040 /*
1046 1041 * hermon_eq_demux()
1047 1042 * Context: Called only from interrupt context
1048 1043 * Usage: to demux the various type reported on one EQ
1049 1044 */
1050 1045 static int
1051 1046 hermon_eq_demux(hermon_state_t *state, hermon_eqhdl_t eq,
1052 1047 hermon_hw_eqe_t *eqe)
1053 1048 {
1054 1049 uint_t eqe_evttype;
1055 1050 int status = DDI_FAILURE;
1056 1051
1057 1052 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe);
1058 1053
1059 1054 switch (eqe_evttype) {
1060 1055
1061 1056 case HERMON_EVT_PORT_STATE_CHANGE:
1062 1057 status = hermon_port_state_change_handler(state, eq, eqe);
1063 1058 break;
1064 1059
1065 1060 case HERMON_EVT_COMM_ESTABLISHED:
1066 1061 status = hermon_comm_estbl_handler(state, eq, eqe);
1067 1062 break;
1068 1063
1069 1064 case HERMON_EVT_COMMAND_INTF_COMP:
1070 1065 status = hermon_cmd_complete_handler(state, eq, eqe);
1071 1066 break;
1072 1067
1073 1068 case HERMON_EVT_LOCAL_WQ_CAT_ERROR:
1074 1069 HERMON_WARNING(state, HERMON_FMA_LOCCAT);
1075 1070 status = hermon_local_wq_cat_err_handler(state, eq, eqe);
1076 1071 break;
1077 1072
1078 1073 case HERMON_EVT_INV_REQ_LOCAL_WQ_ERROR:
1079 1074 HERMON_WARNING(state, HERMON_FMA_LOCINV);
1080 1075 status = hermon_invreq_local_wq_err_handler(state, eq, eqe);
1081 1076 break;
1082 1077
1083 1078 case HERMON_EVT_LOCAL_ACC_VIO_WQ_ERROR:
1084 1079 HERMON_WARNING(state, HERMON_FMA_LOCACEQ);
1085 1080 IBTF_DPRINTF_L2("async", HERMON_FMA_LOCACEQ);
1086 1081 status = hermon_local_acc_vio_wq_err_handler(state, eq, eqe);
1087 1082 break;
1088 1083 case HERMON_EVT_SEND_QUEUE_DRAINED:
1089 1084 status = hermon_sendq_drained_handler(state, eq, eqe);
1090 1085 break;
1091 1086
1092 1087 case HERMON_EVT_PATH_MIGRATED:
1093 1088 status = hermon_path_mig_handler(state, eq, eqe);
1094 1089 break;
1095 1090
1096 1091 case HERMON_EVT_PATH_MIGRATE_FAILED:
1097 1092 HERMON_WARNING(state, HERMON_FMA_PATHMIG);
1098 1093 status = hermon_path_mig_err_handler(state, eq, eqe);
1099 1094 break;
1100 1095
1101 1096 case HERMON_EVT_SRQ_CATASTROPHIC_ERROR:
1102 1097 HERMON_WARNING(state, HERMON_FMA_SRQCAT);
1103 1098 status = hermon_catastrophic_handler(state, eq, eqe);
1104 1099 break;
1105 1100
1106 1101 case HERMON_EVT_SRQ_LAST_WQE_REACHED:
1107 1102 status = hermon_srq_last_wqe_reached_handler(state, eq, eqe);
1108 1103 break;
1109 1104
1110 1105 case HERMON_EVT_FEXCH_ERROR:
1111 1106 status = hermon_fexch_error_handler(state, eq, eqe);
1112 1107 break;
1113 1108
1114 1109 default:
1115 1110 break;
1116 1111 }
1117 1112 return (status);
1118 1113 }
1119 1114
1120 1115 /*
1121 1116 * hermon_port_state_change_handler()
1122 1117 * Context: Only called from interrupt context
1123 1118 */
1124 1119 /* ARGSUSED */
1125 1120 static int
1126 1121 hermon_port_state_change_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1127 1122 hermon_hw_eqe_t *eqe)
1128 1123 {
1129 1124 ibc_async_event_t event;
1130 1125 ibt_async_code_t type;
1131 1126 uint_t subtype;
1132 1127 uint8_t port;
1133 1128 char link_msg[24];
1134 1129
1135 1130 /*
1136 1131 * Depending on the type of Port State Change event, pass the
1137 1132 * appropriate asynch event to the IBTF.
1138 1133 */
1139 1134 port = (uint8_t)HERMON_EQE_PORTNUM_GET(eq, eqe);
1140 1135
1141 1136 /* Check for valid port number in event */
1142 1137 if ((port == 0) || (port > state->hs_cfg_profile->cp_num_ports)) {
1143 1138 HERMON_WARNING(state, "Unexpected port number in port state "
1144 1139 "change event");
1145 1140 cmn_err(CE_CONT, " Port number: %02x\n", port);
1146 1141 return (DDI_FAILURE);
1147 1142 }
1148 1143
1149 1144 subtype = HERMON_EQE_EVTSUBTYPE_GET(eq, eqe);
1150 1145 if (subtype == HERMON_PORT_LINK_ACTIVE) {
1151 1146 event.ev_port = port;
1152 1147 type = IBT_EVENT_PORT_UP;
1153 1148
1154 1149 (void) snprintf(link_msg, 23, "port %d up", port);
1155 1150 ddi_dev_report_fault(state->hs_dip, DDI_SERVICE_RESTORED,
1156 1151 DDI_EXTERNAL_FAULT, link_msg);
1157 1152 } else if (subtype == HERMON_PORT_LINK_DOWN) {
1158 1153 event.ev_port = port;
1159 1154 type = IBT_ERROR_PORT_DOWN;
1160 1155
1161 1156 (void) snprintf(link_msg, 23, "port %d down", port);
1162 1157 ddi_dev_report_fault(state->hs_dip, DDI_SERVICE_LOST,
1163 1158 DDI_EXTERNAL_FAULT, link_msg);
1164 1159 } else {
1165 1160 HERMON_WARNING(state, "Unexpected subtype in port state change "
1166 1161 "event");
1167 1162 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n",
1168 1163 HERMON_EQE_EVTTYPE_GET(eq, eqe), subtype);
1169 1164 return (DDI_FAILURE);
1170 1165 }
1171 1166
1172 1167 /*
1173 1168 * Deliver the event to the IBTF. Note: If "hs_ibtfpriv" is NULL,
1174 1169 * then we have either received this event before we finished
1175 1170 * attaching to the IBTF or we've received it while we are in the
1176 1171 * process of detaching.
1177 1172 */
1178 1173 if (state->hs_ibtfpriv != NULL) {
1179 1174 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1180 1175 }
1181 1176
1182 1177 return (DDI_SUCCESS);
1183 1178 }
1184 1179
1185 1180
1186 1181 /*
1187 1182 * hermon_comm_estbl_handler()
1188 1183 * Context: Only called from interrupt context
1189 1184 */
1190 1185 /* ARGSUSED */
1191 1186 static int
1192 1187 hermon_comm_estbl_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1193 1188 hermon_hw_eqe_t *eqe)
1194 1189 {
1195 1190 hermon_qphdl_t qp;
1196 1191 uint_t qpnum;
1197 1192 ibc_async_event_t event;
1198 1193 ibt_async_code_t type;
1199 1194
1200 1195 /* Get the QP handle from QP number in event descriptor */
1201 1196 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1202 1197 qp = hermon_qphdl_from_qpnum(state, qpnum);
1203 1198
1204 1199 /*
1205 1200 * If the QP handle is NULL, this is probably an indication
1206 1201 * that the QP has been freed already. In which case, we
1207 1202 * should not deliver this event.
1208 1203 *
1209 1204 * We also check that the QP number in the handle is the
1210 1205 * same as the QP number in the event queue entry. This
1211 1206 * extra check allows us to handle the case where a QP was
1212 1207 * freed and then allocated again in the time it took to
1213 1208 * handle the event queue processing. By constantly incrementing
1214 1209 * the non-constrained portion of the QP number every time
1215 1210 * a new QP is allocated, we mitigate (somewhat) the chance
1216 1211 * that a stale event could be passed to the client's QP
1217 1212 * handler.
1218 1213 *
1219 1214 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1220 1215 * means that we've have either received this event before we
1221 1216 * finished attaching to the IBTF or we've received it while we
1222 1217 * are in the process of detaching.
1223 1218 */
1224 1219 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1225 1220 (state->hs_ibtfpriv != NULL)) {
1226 1221 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1227 1222 type = IBT_EVENT_COM_EST_QP;
1228 1223
1229 1224 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1230 1225 }
1231 1226
1232 1227 return (DDI_SUCCESS);
1233 1228 }
1234 1229
1235 1230
1236 1231 /*
1237 1232 * hermon_local_wq_cat_err_handler()
1238 1233 * Context: Only called from interrupt context
1239 1234 */
1240 1235 /* ARGSUSED */
1241 1236 static int
1242 1237 hermon_local_wq_cat_err_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1243 1238 hermon_hw_eqe_t *eqe)
1244 1239 {
1245 1240 hermon_qphdl_t qp;
1246 1241 uint_t qpnum;
1247 1242 ibc_async_event_t event;
1248 1243 ibt_async_code_t type;
1249 1244
1250 1245 /* Get the QP handle from QP number in event descriptor */
1251 1246 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1252 1247 qp = hermon_qphdl_from_qpnum(state, qpnum);
1253 1248
1254 1249 /*
1255 1250 * If the QP handle is NULL, this is probably an indication
1256 1251 * that the QP has been freed already. In which case, we
1257 1252 * should not deliver this event.
1258 1253 *
1259 1254 * We also check that the QP number in the handle is the
1260 1255 * same as the QP number in the event queue entry. This
1261 1256 * extra check allows us to handle the case where a QP was
1262 1257 * freed and then allocated again in the time it took to
1263 1258 * handle the event queue processing. By constantly incrementing
1264 1259 * the non-constrained portion of the QP number every time
1265 1260 * a new QP is allocated, we mitigate (somewhat) the chance
1266 1261 * that a stale event could be passed to the client's QP
1267 1262 * handler.
1268 1263 *
1269 1264 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1270 1265 * means that we've have either received this event before we
1271 1266 * finished attaching to the IBTF or we've received it while we
1272 1267 * are in the process of detaching.
1273 1268 */
1274 1269 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1275 1270 (state->hs_ibtfpriv != NULL)) {
1276 1271 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1277 1272 type = IBT_ERROR_CATASTROPHIC_QP;
1278 1273
1279 1274 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1280 1275 }
1281 1276
1282 1277 return (DDI_SUCCESS);
1283 1278 }
1284 1279
1285 1280
1286 1281 /*
1287 1282 * hermon_invreq_local_wq_err_handler()
1288 1283 * Context: Only called from interrupt context
1289 1284 */
1290 1285 /* ARGSUSED */
1291 1286 static int
1292 1287 hermon_invreq_local_wq_err_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1293 1288 hermon_hw_eqe_t *eqe)
1294 1289 {
1295 1290 hermon_qphdl_t qp;
1296 1291 uint_t qpnum;
1297 1292 ibc_async_event_t event;
1298 1293 ibt_async_code_t type;
1299 1294
1300 1295 /* Get the QP handle from QP number in event descriptor */
1301 1296 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1302 1297 qp = hermon_qphdl_from_qpnum(state, qpnum);
1303 1298
1304 1299 /*
1305 1300 * If the QP handle is NULL, this is probably an indication
1306 1301 * that the QP has been freed already. In which case, we
1307 1302 * should not deliver this event.
1308 1303 *
1309 1304 * We also check that the QP number in the handle is the
1310 1305 * same as the QP number in the event queue entry. This
1311 1306 * extra check allows us to handle the case where a QP was
1312 1307 * freed and then allocated again in the time it took to
1313 1308 * handle the event queue processing. By constantly incrementing
1314 1309 * the non-constrained portion of the QP number every time
1315 1310 * a new QP is allocated, we mitigate (somewhat) the chance
1316 1311 * that a stale event could be passed to the client's QP
1317 1312 * handler.
1318 1313 *
1319 1314 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1320 1315 * means that we've have either received this event before we
1321 1316 * finished attaching to the IBTF or we've received it while we
1322 1317 * are in the process of detaching.
1323 1318 */
1324 1319 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1325 1320 (state->hs_ibtfpriv != NULL)) {
1326 1321 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1327 1322 type = IBT_ERROR_INVALID_REQUEST_QP;
1328 1323
1329 1324 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1330 1325 }
1331 1326
1332 1327 return (DDI_SUCCESS);
1333 1328 }
1334 1329
1335 1330
1336 1331 /*
1337 1332 * hermon_local_acc_vio_wq_err_handler()
1338 1333 * Context: Only called from interrupt context
1339 1334 */
1340 1335 /* ARGSUSED */
1341 1336 static int
1342 1337 hermon_local_acc_vio_wq_err_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1343 1338 hermon_hw_eqe_t *eqe)
1344 1339 {
1345 1340 hermon_qphdl_t qp;
1346 1341 uint_t qpnum;
1347 1342 ibc_async_event_t event;
1348 1343 ibt_async_code_t type;
1349 1344
1350 1345 /* Get the QP handle from QP number in event descriptor */
1351 1346 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1352 1347 qp = hermon_qphdl_from_qpnum(state, qpnum);
1353 1348
1354 1349 /*
1355 1350 * If the QP handle is NULL, this is probably an indication
1356 1351 * that the QP has been freed already. In which case, we
1357 1352 * should not deliver this event.
1358 1353 *
1359 1354 * We also check that the QP number in the handle is the
1360 1355 * same as the QP number in the event queue entry. This
1361 1356 * extra check allows us to handle the case where a QP was
1362 1357 * freed and then allocated again in the time it took to
1363 1358 * handle the event queue processing. By constantly incrementing
1364 1359 * the non-constrained portion of the QP number every time
1365 1360 * a new QP is allocated, we mitigate (somewhat) the chance
1366 1361 * that a stale event could be passed to the client's QP
1367 1362 * handler.
1368 1363 *
1369 1364 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1370 1365 * means that we've have either received this event before we
1371 1366 * finished attaching to the IBTF or we've received it while we
1372 1367 * are in the process of detaching.
1373 1368 */
1374 1369 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1375 1370 (state->hs_ibtfpriv != NULL)) {
1376 1371 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1377 1372 type = IBT_ERROR_ACCESS_VIOLATION_QP;
1378 1373
1379 1374 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1380 1375 }
1381 1376
1382 1377 return (DDI_SUCCESS);
1383 1378 }
1384 1379
1385 1380
1386 1381 /*
1387 1382 * hermon_sendq_drained_handler()
1388 1383 * Context: Only called from interrupt context
1389 1384 */
1390 1385 /* ARGSUSED */
1391 1386 static int
1392 1387 hermon_sendq_drained_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1393 1388 hermon_hw_eqe_t *eqe)
1394 1389 {
1395 1390 hermon_qphdl_t qp;
1396 1391 uint_t qpnum;
1397 1392 ibc_async_event_t event;
1398 1393 uint_t forward_sqd_event;
1399 1394 ibt_async_code_t type;
1400 1395
1401 1396 /* Get the QP handle from QP number in event descriptor */
1402 1397 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1403 1398 qp = hermon_qphdl_from_qpnum(state, qpnum);
1404 1399
1405 1400 /*
1406 1401 * If the QP handle is NULL, this is probably an indication
1407 1402 * that the QP has been freed already. In which case, we
1408 1403 * should not deliver this event.
1409 1404 *
1410 1405 * We also check that the QP number in the handle is the
1411 1406 * same as the QP number in the event queue entry. This
1412 1407 * extra check allows us to handle the case where a QP was
1413 1408 * freed and then allocated again in the time it took to
1414 1409 * handle the event queue processing. By constantly incrementing
1415 1410 * the non-constrained portion of the QP number every time
1416 1411 * a new QP is allocated, we mitigate (somewhat) the chance
1417 1412 * that a stale event could be passed to the client's QP
1418 1413 * handler.
1419 1414 *
1420 1415 * And then we check if "hs_ibtfpriv" is NULL. If it is then it
1421 1416 * means that we've have either received this event before we
1422 1417 * finished attaching to the IBTF or we've received it while we
1423 1418 * are in the process of detaching.
1424 1419 */
1425 1420 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1426 1421 (state->hs_ibtfpriv != NULL)) {
1427 1422 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1428 1423 type = IBT_EVENT_SQD;
1429 1424
1430 1425 /*
1431 1426 * Grab the QP lock and update the QP state to reflect that
1432 1427 * the Send Queue Drained event has arrived. Also determine
1433 1428 * whether the event is intended to be forwarded on to the
1434 1429 * consumer or not. This information is used below in
1435 1430 * determining whether or not to call the IBTF.
1436 1431 */
1437 1432 mutex_enter(&qp->qp_lock);
1438 1433 forward_sqd_event = qp->qp_forward_sqd_event;
1439 1434 qp->qp_forward_sqd_event = 0;
1440 1435 qp->qp_sqd_still_draining = 0;
1441 1436 mutex_exit(&qp->qp_lock);
1442 1437
1443 1438 if (forward_sqd_event != 0) {
1444 1439 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1445 1440 }
1446 1441 }
1447 1442
1448 1443 return (DDI_SUCCESS);
1449 1444 }
1450 1445
1451 1446
1452 1447 /*
1453 1448 * hermon_path_mig_handler()
1454 1449 * Context: Only called from interrupt context
1455 1450 */
1456 1451 /* ARGSUSED */
1457 1452 static int
1458 1453 hermon_path_mig_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1459 1454 hermon_hw_eqe_t *eqe)
1460 1455 {
1461 1456 hermon_qphdl_t qp;
1462 1457 uint_t qpnum;
1463 1458 ibc_async_event_t event;
1464 1459 ibt_async_code_t type;
1465 1460
1466 1461 /* Get the QP handle from QP number in event descriptor */
1467 1462 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1468 1463 qp = hermon_qphdl_from_qpnum(state, qpnum);
1469 1464
1470 1465 /*
1471 1466 * If the QP handle is NULL, this is probably an indication
1472 1467 * that the QP has been freed already. In which case, we
1473 1468 * should not deliver this event.
1474 1469 *
1475 1470 * We also check that the QP number in the handle is the
1476 1471 * same as the QP number in the event queue entry. This
1477 1472 * extra check allows us to handle the case where a QP was
1478 1473 * freed and then allocated again in the time it took to
1479 1474 * handle the event queue processing. By constantly incrementing
1480 1475 * the non-constrained portion of the QP number every time
1481 1476 * a new QP is allocated, we mitigate (somewhat) the chance
1482 1477 * that a stale event could be passed to the client's QP
1483 1478 * handler.
1484 1479 *
1485 1480 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1486 1481 * means that we've have either received this event before we
1487 1482 * finished attaching to the IBTF or we've received it while we
1488 1483 * are in the process of detaching.
1489 1484 */
1490 1485 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1491 1486 (state->hs_ibtfpriv != NULL)) {
1492 1487 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1493 1488 type = IBT_EVENT_PATH_MIGRATED_QP;
1494 1489
1495 1490 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1496 1491 }
1497 1492
1498 1493 return (DDI_SUCCESS);
1499 1494 }
1500 1495
1501 1496
1502 1497 /*
1503 1498 * hermon_path_mig_err_handler()
1504 1499 * Context: Only called from interrupt context
1505 1500 */
1506 1501 /* ARGSUSED */
1507 1502 static int
1508 1503 hermon_path_mig_err_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1509 1504 hermon_hw_eqe_t *eqe)
1510 1505 {
1511 1506 hermon_qphdl_t qp;
1512 1507 uint_t qpnum;
1513 1508 ibc_async_event_t event;
1514 1509 ibt_async_code_t type;
1515 1510
1516 1511 /* Get the QP handle from QP number in event descriptor */
1517 1512 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1518 1513 qp = hermon_qphdl_from_qpnum(state, qpnum);
1519 1514
1520 1515 /*
1521 1516 * If the QP handle is NULL, this is probably an indication
1522 1517 * that the QP has been freed already. In which case, we
1523 1518 * should not deliver this event.
1524 1519 *
1525 1520 * We also check that the QP number in the handle is the
1526 1521 * same as the QP number in the event queue entry. This
1527 1522 * extra check allows us to handle the case where a QP was
1528 1523 * freed and then allocated again in the time it took to
1529 1524 * handle the event queue processing. By constantly incrementing
1530 1525 * the non-constrained portion of the QP number every time
1531 1526 * a new QP is allocated, we mitigate (somewhat) the chance
1532 1527 * that a stale event could be passed to the client's QP
1533 1528 * handler.
1534 1529 *
1535 1530 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1536 1531 * means that we've have either received this event before we
1537 1532 * finished attaching to the IBTF or we've received it while we
1538 1533 * are in the process of detaching.
1539 1534 */
1540 1535 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1541 1536 (state->hs_ibtfpriv != NULL)) {
1542 1537 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1543 1538 type = IBT_ERROR_PATH_MIGRATE_REQ_QP;
1544 1539
1545 1540 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1546 1541 }
1547 1542
1548 1543 return (DDI_SUCCESS);
1549 1544 }
1550 1545
1551 1546
1552 1547 /*
1553 1548 * hermon_catastrophic_handler()
1554 1549 * Context: Only called from interrupt context
1555 1550 */
1556 1551 /* ARGSUSED */
1557 1552 static int
1558 1553 hermon_catastrophic_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1559 1554 hermon_hw_eqe_t *eqe)
1560 1555 {
1561 1556 hermon_qphdl_t qp;
1562 1557 uint_t qpnum;
1563 1558 ibc_async_event_t event;
1564 1559 ibt_async_code_t type;
1565 1560
1566 1561 if (eq->eq_evttypemask == HERMON_EVT_MSK_LOCAL_CAT_ERROR) {
1567 1562 HERMON_FMANOTE(state, HERMON_FMA_INTERNAL);
1568 1563 hermon_eq_catastrophic(state);
1569 1564 return (DDI_SUCCESS);
1570 1565 }
1571 1566
1572 1567 /* Get the QP handle from QP number in event descriptor */
1573 1568 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1574 1569 qp = hermon_qphdl_from_qpnum(state, qpnum);
1575 1570
1576 1571 /*
1577 1572 * If the QP handle is NULL, this is probably an indication
1578 1573 * that the QP has been freed already. In which case, we
1579 1574 * should not deliver this event.
1580 1575 *
1581 1576 * We also check that the QP number in the handle is the
1582 1577 * same as the QP number in the event queue entry. This
1583 1578 * extra check allows us to handle the case where a QP was
1584 1579 * freed and then allocated again in the time it took to
1585 1580 * handle the event queue processing. By constantly incrementing
1586 1581 * the non-constrained portion of the QP number every time
1587 1582 * a new QP is allocated, we mitigate (somewhat) the chance
1588 1583 * that a stale event could be passed to the client's QP
1589 1584 * handler.
1590 1585 *
1591 1586 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1592 1587 * means that we've have either received this event before we
1593 1588 * finished attaching to the IBTF or we've received it while we
1594 1589 * are in the process of detaching.
1595 1590 */
1596 1591 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1597 1592 (state->hs_ibtfpriv != NULL)) {
1598 1593 event.ev_srq_hdl = (ibt_srq_hdl_t)qp->qp_srqhdl->srq_hdlrarg;
1599 1594 type = IBT_ERROR_CATASTROPHIC_SRQ;
1600 1595
1601 1596 mutex_enter(&qp->qp_srqhdl->srq_lock);
1602 1597 qp->qp_srqhdl->srq_state = HERMON_SRQ_STATE_ERROR;
1603 1598 mutex_exit(&qp->qp_srqhdl->srq_lock);
1604 1599
1605 1600 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1606 1601 }
1607 1602
1608 1603 return (DDI_SUCCESS);
1609 1604 }
1610 1605
1611 1606
1612 1607 /*
1613 1608 * hermon_srq_last_wqe_reached_handler()
1614 1609 * Context: Only called from interrupt context
1615 1610 */
1616 1611 /* ARGSUSED */
1617 1612 static int
1618 1613 hermon_srq_last_wqe_reached_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1619 1614 hermon_hw_eqe_t *eqe)
1620 1615 {
1621 1616 hermon_qphdl_t qp;
1622 1617 uint_t qpnum;
1623 1618 ibc_async_event_t event;
1624 1619 ibt_async_code_t type;
1625 1620
1626 1621 /* Get the QP handle from QP number in event descriptor */
1627 1622 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe);
1628 1623 qp = hermon_qphdl_from_qpnum(state, qpnum);
1629 1624
1630 1625 /*
1631 1626 * If the QP handle is NULL, this is probably an indication
1632 1627 * that the QP has been freed already. In which case, we
1633 1628 * should not deliver this event.
1634 1629 *
1635 1630 * We also check that the QP number in the handle is the
1636 1631 * same as the QP number in the event queue entry. This
1637 1632 * extra check allows us to handle the case where a QP was
1638 1633 * freed and then allocated again in the time it took to
1639 1634 * handle the event queue processing. By constantly incrementing
1640 1635 * the non-constrained portion of the QP number every time
1641 1636 * a new QP is allocated, we mitigate (somewhat) the chance
1642 1637 * that a stale event could be passed to the client's QP
1643 1638 * handler.
1644 1639 *
1645 1640 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1646 1641 * means that we've have either received this event before we
1647 1642 * finished attaching to the IBTF or we've received it while we
1648 1643 * are in the process of detaching.
1649 1644 */
1650 1645 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1651 1646 (state->hs_ibtfpriv != NULL)) {
1652 1647 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1653 1648 type = IBT_EVENT_EMPTY_CHAN;
1654 1649
1655 1650 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1656 1651 }
1657 1652
1658 1653 return (DDI_SUCCESS);
1659 1654 }
1660 1655
1661 1656
1662 1657 /* ARGSUSED */
1663 1658 static int hermon_fexch_error_handler(hermon_state_t *state,
1664 1659 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe)
1665 1660 {
1666 1661 hermon_qphdl_t qp;
1667 1662 uint_t qpnum;
1668 1663 ibc_async_event_t event;
1669 1664 ibt_async_code_t type;
1670 1665
1671 1666 /* Get the QP handle from QP number in event descriptor */
1672 1667 event.ev_port = HERMON_EQE_FEXCH_PORTNUM_GET(eq, eqe);
1673 1668 qpnum = hermon_fcoib_qpnum_from_fexch(state,
1674 1669 event.ev_port, HERMON_EQE_FEXCH_FEXCH_GET(eq, eqe));
1675 1670 qp = hermon_qphdl_from_qpnum(state, qpnum);
1676 1671
1677 1672 event.ev_fc = HERMON_EQE_FEXCH_SYNDROME_GET(eq, eqe);
1678 1673
1679 1674 /*
1680 1675 * If the QP handle is NULL, this is probably an indication
1681 1676 * that the QP has been freed already. In which case, we
1682 1677 * should not deliver this event.
1683 1678 *
1684 1679 * We also check that the QP number in the handle is the
1685 1680 * same as the QP number in the event queue entry. This
1686 1681 * extra check allows us to handle the case where a QP was
1687 1682 * freed and then allocated again in the time it took to
1688 1683 * handle the event queue processing. By constantly incrementing
1689 1684 * the non-constrained portion of the QP number every time
1690 1685 * a new QP is allocated, we mitigate (somewhat) the chance
1691 1686 * that a stale event could be passed to the client's QP
1692 1687 * handler.
1693 1688 *
1694 1689 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1695 1690 * means that we've have either received this event before we
1696 1691 * finished attaching to the IBTF or we've received it while we
1697 1692 * are in the process of detaching.
1698 1693 */
1699 1694 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1700 1695 (state->hs_ibtfpriv != NULL)) {
1701 1696 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1702 1697 type = IBT_FEXCH_ERROR;
1703 1698
1704 1699 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1705 1700 }
1706 1701
1707 1702 return (DDI_SUCCESS);
1708 1703 }
1709 1704
1710 1705
1711 1706 /*
1712 1707 * hermon_no_eqhandler
1713 1708 * Context: Only called from interrupt context
1714 1709 */
1715 1710 /* ARGSUSED */
1716 1711 static int
1717 1712 hermon_no_eqhandler(hermon_state_t *state, hermon_eqhdl_t eq,
1718 1713 hermon_hw_eqe_t *eqe)
1719 1714 {
1720 1715 uint_t data;
1721 1716 int i;
1722 1717
1723 1718 /*
1724 1719 * This "unexpected event" handler (or "catch-all" handler) will
1725 1720 * receive all events for which no other handler has been registered.
1726 1721 * If we end up here, then something has probably gone seriously wrong
1727 1722 * with the Hermon hardware (or, perhaps, with the software... though
1728 1723 * it's unlikely in this case). The EQE provides all the information
1729 1724 * about the event. So we print a warning message here along with
1730 1725 * the contents of the EQE.
1731 1726 */
1732 1727 HERMON_WARNING(state, "Unexpected Event handler");
1733 1728 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n",
1734 1729 HERMON_EQE_EVTTYPE_GET(eq, eqe),
1735 1730 HERMON_EQE_EVTSUBTYPE_GET(eq, eqe));
1736 1731 for (i = 0; i < sizeof (hermon_hw_eqe_t) >> 2; i++) {
1737 1732 data = ((uint_t *)eqe)[i];
1738 1733 cmn_err(CE_CONT, " EQE[%02x]: %08x\n", i, data);
1739 1734 }
1740 1735
1741 1736 return (DDI_SUCCESS);
1742 1737 }
↓ open down ↓ |
919 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX