Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/tavor/tavor_event.c
+++ new/usr/src/uts/common/io/ib/adapters/tavor/tavor_event.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * tavor_event.c
29 29 * Tavor Interrupt and Event Processing Routines
30 30 *
31 31 * Implements all the routines necessary for allocating, freeing, and
32 32 * handling all of the various event types that the Tavor hardware can
33 33 * generate.
34 34 * These routines include the main Tavor interrupt service routine
35 35 * (tavor_isr()) as well as all the code necessary to setup and handle
36 36 * events from each of the many event queues used by the Tavor device.
37 37 */
38 38
39 39 #include <sys/types.h>
40 40 #include <sys/conf.h>
41 41 #include <sys/ddi.h>
42 42 #include <sys/sunddi.h>
43 43 #include <sys/modctl.h>
44 44
45 45 #include <sys/ib/adapters/tavor/tavor.h>
46 46
47 47 static void tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq);
48 48 static void tavor_eq_catastrophic(tavor_state_t *state);
49 49 static int tavor_eq_alloc(tavor_state_t *state, uint32_t log_eq_size,
50 50 uint_t intr, tavor_eqhdl_t *eqhdl);
51 51 static int tavor_eq_free(tavor_state_t *state, tavor_eqhdl_t *eqhdl);
52 52 static int tavor_eq_handler_init(tavor_state_t *state, tavor_eqhdl_t eq,
53 53 uint_t evt_type_mask, int (*eqfunc)(tavor_state_t *state,
54 54 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe));
55 55 static int tavor_eq_handler_fini(tavor_state_t *state, tavor_eqhdl_t eq);
56 56 static void tavor_eqe_sync(tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe, uint_t flag,
57 57 uint_t force_sync);
58 58 static int tavor_port_state_change_handler(tavor_state_t *state,
59 59 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
60 60 static int tavor_comm_estbl_handler(tavor_state_t *state,
61 61 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
62 62 static int tavor_local_wq_cat_err_handler(tavor_state_t *state,
63 63 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
64 64 static int tavor_invreq_local_wq_err_handler(tavor_state_t *state,
65 65 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
66 66 static int tavor_local_acc_vio_wq_err_handler(tavor_state_t *state,
67 67 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
68 68 static int tavor_sendq_drained_handler(tavor_state_t *state,
69 69 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
70 70 static int tavor_path_mig_handler(tavor_state_t *state,
71 71 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
72 72 static int tavor_path_mig_err_handler(tavor_state_t *state,
73 73 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
74 74 static int tavor_srq_catastrophic_handler(tavor_state_t *state,
75 75 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
76 76 static int tavor_srq_last_wqe_reached_handler(tavor_state_t *state,
77 77 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
78 78 static int tavor_ecc_detection_handler(tavor_state_t *state,
79 79 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe);
80 80 static int tavor_no_eqhandler(tavor_state_t *state, tavor_eqhdl_t eq,
81 81 tavor_hw_eqe_t *eqe);
82 82
83 83
84 84 /*
85 85 * tavor_eq_init_all
86 86 * Context: Only called from attach() path context
87 87 */
88 88 int
89 89 tavor_eq_init_all(tavor_state_t *state)
90 90 {
91 91 uint_t log_eq_size, intr_num;
92 92 uint_t num_eq, num_eq_init, num_eq_unmap;
93 93 int status, i;
94 94 char *errormsg;
95 95
96 96 TAVOR_TNF_ENTER(tavor_eq_init_all);
97 97
98 98 /*
99 99 * For now, all Event Queues default to the same size (pulled from
100 100 * the current configuration profile) and are all assigned to the
101 101 * same interrupt or MSI. In the future we may support assigning
102 102 * EQs to specific interrupts or MSIs XXX
103 103 */
104 104 log_eq_size = state->ts_cfg_profile->cp_log_default_eq_sz;
105 105
106 106 /*
107 107 * If MSI is to be used, then set intr_num to the MSI number
108 108 * (currently zero because we're using only one) or'd with the
109 109 * MSI enable flag. Otherwise, for regular (i.e. 'legacy') interrupt,
110 110 * use the 'inta_pin' value returned by QUERY_ADAPTER.
111 111 */
112 112 if (state->ts_intr_type_chosen == DDI_INTR_TYPE_MSI) {
113 113 intr_num = TAVOR_EQ_MSI_ENABLE_FLAG | 0;
114 114 } else {
115 115 intr_num = state->ts_adapter.inta_pin;
116 116 }
117 117
118 118 /*
119 119 * Total number of supported EQs is hardcoded. Tavor hardware
120 120 * supports up to 64 EQs. We are currently using only 45 of them
121 121 * We will set aside the first 32 for use with Completion Queues (CQ)
122 122 * and reserve a few of the other 32 for each specific class of event
123 123 * (see below for more details).
124 124 */
125 125 num_eq = TAVOR_NUM_EQ_USED;
126 126
127 127 /*
128 128 * The "num_eq_unmap" variable is used in any possible failure
129 129 * cleanup (below) to indicate which events queues might require
130 130 * possible event class unmapping.
131 131 */
132 132 num_eq_unmap = 0;
133 133
134 134 /*
135 135 * Allocate and initialize all the Event Queues. If any of these
136 136 * EQ allocations fail then jump to the end, cleanup what had been
137 137 * successfully initialized, and return an error.
138 138 */
139 139 for (i = 0; i < num_eq; i++) {
140 140 status = tavor_eq_alloc(state, log_eq_size, intr_num,
141 141 &state->ts_eqhdl[i]);
142 142 if (status != DDI_SUCCESS) {
143 143 /* Set "status" and "errormsg" and goto failure */
144 144 TAVOR_TNF_FAIL(status, "failed EQ alloc");
145 145 num_eq_init = i;
146 146 goto all_eq_init_fail;
147 147 }
148 148 }
149 149 num_eq_init = num_eq;
150 150
151 151 /*
152 152 * Setup EQ0-EQ31 for use with Completion Queues. Note: We can
153 153 * cast the return value to void here because, when we use the
154 154 * TAVOR_EVT_NO_MASK flag, it is not possible for
155 155 * tavor_eq_handler_init() to return an error.
156 156 */
157 157 for (i = 0; i < 32; i++) {
158 158 (void) tavor_eq_handler_init(state, state->ts_eqhdl[i],
159 159 TAVOR_EVT_NO_MASK, tavor_cq_handler);
160 160 }
161 161 num_eq_unmap = 32;
162 162
163 163 /*
164 164 * Setup EQ32 for handling Completion Queue Error Events.
165 165 *
166 166 * These events include things like CQ overflow or CQ access
167 167 * violation errors. If this setup fails for any reason (which, in
168 168 * general, it really never should), then jump to the end, cleanup
169 169 * everything that has been successfully initialized, and return an
170 170 * error.
171 171 */
172 172 status = tavor_eq_handler_init(state, state->ts_eqhdl[32],
173 173 TAVOR_EVT_MSK_CQ_ERRORS, tavor_cq_err_handler);
174 174 if (status != DDI_SUCCESS) {
175 175 /* Set "status" and "errormsg" and goto failure */
176 176 TAVOR_TNF_FAIL(status, "completion queue error event");
177 177 goto all_eq_init_fail;
178 178 }
179 179 num_eq_unmap = 33;
180 180
181 181 /*
182 182 * Setup EQ33 for handling Port State Change Events
183 183 *
184 184 * These events include things like Port Up and Port Down events.
185 185 * If this setup fails for any reason (which, in general, it really
186 186 * never should), then undo all previous EQ mapping, jump to the end,
187 187 * cleanup everything that has been successfully initialized, and
188 188 * return an error.
189 189 */
190 190 status = tavor_eq_handler_init(state, state->ts_eqhdl[33],
191 191 TAVOR_EVT_MSK_PORT_STATE_CHANGE, tavor_port_state_change_handler);
192 192 if (status != DDI_SUCCESS) {
193 193 /* Set "status" and "errormsg" and goto failure */
194 194 TAVOR_TNF_FAIL(status, "port state change event");
195 195 goto all_eq_init_fail;
196 196 }
197 197 num_eq_unmap = 34;
198 198
199 199 /*
200 200 * Setup EQ34 for handling Communication Established Events
201 201 *
202 202 * These events correspond to the IB affiliated asynchronous events
203 203 * that are used for connection management. If this setup fails for
204 204 * any reason (which, in general, it really never should), then undo
205 205 * all previous EQ mapping, jump to the end, cleanup everything that
206 206 * has been successfully initialized, and return an error.
207 207 */
208 208 status = tavor_eq_handler_init(state, state->ts_eqhdl[34],
209 209 TAVOR_EVT_MSK_COMM_ESTABLISHED, tavor_comm_estbl_handler);
210 210 if (status != DDI_SUCCESS) {
211 211 /* Set "status" and "errormsg" and goto failure */
212 212 TAVOR_TNF_FAIL(status, "communication established event");
213 213 goto all_eq_init_fail;
214 214 }
215 215 num_eq_unmap = 35;
216 216
217 217 /*
218 218 * Setup EQ35 for handling Command Completion Events
219 219 *
220 220 * These events correspond to the Tavor generated events that are used
221 221 * to indicate Tavor firmware command completion. These events are
222 222 * only generated when Tavor firmware commands are posted using the
223 223 * asynchronous completion mechanism. If this setup fails for any
224 224 * reason (which, in general, it really never should), then undo all
225 225 * previous EQ mapping, jump to the end, cleanup everything that has
226 226 * been successfully initialized, and return an error.
227 227 */
228 228 status = tavor_eq_handler_init(state, state->ts_eqhdl[35],
229 229 TAVOR_EVT_MSK_COMMAND_INTF_COMP, tavor_cmd_complete_handler);
230 230 if (status != DDI_SUCCESS) {
231 231 /* Set "status" and "errormsg" and goto failure */
232 232 TAVOR_TNF_FAIL(status, "command completion event");
233 233 goto all_eq_init_fail;
234 234 }
235 235 num_eq_unmap = 36;
236 236
237 237 /*
238 238 * Setup EQ36 for handling Local WQ Catastrophic Error Events
239 239 *
240 240 * These events correspond to the similarly-named IB affiliated
241 241 * asynchronous error type. If this setup fails for any reason
242 242 * (which, in general, it really never should), then undo all previous
243 243 * EQ mapping, jump to the end, cleanup everything that has been
244 244 * successfully initialized, and return an error.
245 245 */
246 246 status = tavor_eq_handler_init(state, state->ts_eqhdl[36],
247 247 TAVOR_EVT_MSK_LOCAL_WQ_CAT_ERROR, tavor_local_wq_cat_err_handler);
248 248 if (status != DDI_SUCCESS) {
249 249 /* Set "status" and "errormsg" and goto failure */
250 250 TAVOR_TNF_FAIL(status, "local WQ catastrophic error event");
251 251 goto all_eq_init_fail;
252 252 }
253 253 num_eq_unmap = 37;
254 254
255 255 /*
256 256 * Setup EQ37 for handling Invalid Req Local WQ Error Events
257 257 *
258 258 * These events also correspond to the similarly-named IB affiliated
259 259 * asynchronous error type. If this setup fails for any reason
260 260 * (which, in general, it really never should), then undo all previous
261 261 * EQ mapping, jump to the end, cleanup everything that has been
262 262 * successfully initialized, and return an error.
263 263 */
264 264 status = tavor_eq_handler_init(state, state->ts_eqhdl[37],
265 265 TAVOR_EVT_MSK_INV_REQ_LOCAL_WQ_ERROR,
266 266 tavor_invreq_local_wq_err_handler);
267 267 if (status != DDI_SUCCESS) {
268 268 /* Set "status" and "errormsg" and goto failure */
269 269 TAVOR_TNF_FAIL(status, "invalid req local WQ error event");
270 270 goto all_eq_init_fail;
271 271 }
272 272 num_eq_unmap = 38;
273 273
274 274 /*
275 275 * Setup EQ38 for handling Local Access Violation WQ Error Events
276 276 *
277 277 * These events also correspond to the similarly-named IB affiliated
278 278 * asynchronous error type. If this setup fails for any reason
279 279 * (which, in general, it really never should), then undo all previous
280 280 * EQ mapping, jump to the end, cleanup everything that has been
281 281 * successfully initialized, and return an error.
282 282 */
283 283 status = tavor_eq_handler_init(state, state->ts_eqhdl[38],
284 284 TAVOR_EVT_MSK_LOCAL_ACC_VIO_WQ_ERROR,
285 285 tavor_local_acc_vio_wq_err_handler);
286 286 if (status != DDI_SUCCESS) {
287 287 /* Set "status" and "errormsg" and goto failure */
288 288 TAVOR_TNF_FAIL(status, "local access violation WQ error event");
289 289 goto all_eq_init_fail;
290 290 }
291 291 num_eq_unmap = 39;
292 292
293 293 /*
294 294 * Setup EQ39 for handling Send Queue Drained Events
295 295 *
296 296 * These events correspond to the IB affiliated asynchronous events
297 297 * that are used to indicate completion of a Send Queue Drained QP
298 298 * state transition. If this setup fails for any reason (which, in
299 299 * general, it really never should), then undo all previous EQ
300 300 * mapping, jump to the end, cleanup everything that has been
301 301 * successfully initialized, and return an error.
302 302 */
303 303 status = tavor_eq_handler_init(state, state->ts_eqhdl[39],
304 304 TAVOR_EVT_MSK_SEND_QUEUE_DRAINED, tavor_sendq_drained_handler);
305 305 if (status != DDI_SUCCESS) {
306 306 /* Set "status" and "errormsg" and goto failure */
307 307 TAVOR_TNF_FAIL(status, "send queue drained event");
308 308 goto all_eq_init_fail;
309 309 }
310 310 num_eq_unmap = 40;
311 311
312 312 /*
313 313 * Setup EQ40 for handling Path Migration Succeeded Events
314 314 *
315 315 * These events correspond to the IB affiliated asynchronous events
316 316 * that are used to indicate successful completion of a path
317 317 * migration. If this setup fails for any reason (which, in general,
318 318 * it really never should), then undo all previous EQ mapping, jump
319 319 * to the end, cleanup everything that has been successfully
320 320 * initialized, and return an error.
321 321 */
322 322 status = tavor_eq_handler_init(state, state->ts_eqhdl[40],
323 323 TAVOR_EVT_MSK_PATH_MIGRATED, tavor_path_mig_handler);
324 324 if (status != DDI_SUCCESS) {
325 325 /* Set "status" and "errormsg" and goto failure */
326 326 TAVOR_TNF_FAIL(status, "path migration succeeded event");
327 327 goto all_eq_init_fail;
328 328 }
329 329 num_eq_unmap = 41;
330 330
331 331 /*
332 332 * Setup EQ41 for handling Path Migration Failed Events
333 333 *
334 334 * These events correspond to the IB affiliated asynchronous events
335 335 * that are used to indicate that path migration was not successful.
336 336 * If this setup fails for any reason (which, in general, it really
337 337 * never should), then undo all previous EQ mapping, jump to the end,
338 338 * cleanup everything that has been successfully initialized, and
339 339 * return an error.
340 340 */
341 341 status = tavor_eq_handler_init(state, state->ts_eqhdl[41],
342 342 TAVOR_EVT_MSK_PATH_MIGRATE_FAILED, tavor_path_mig_err_handler);
343 343 if (status != DDI_SUCCESS) {
344 344 /* Set "status" and "errormsg" and goto failure */
345 345 TAVOR_TNF_FAIL(status, "path migration failed event");
346 346 goto all_eq_init_fail;
347 347 }
348 348 num_eq_unmap = 42;
349 349
350 350 /*
351 351 * Setup EQ42 for handling Local Catastrophic Error Events
352 352 *
353 353 * These events correspond to the similarly-named IB unaffiliated
354 354 * asynchronous error type. If this setup fails for any reason
355 355 * (which, in general, it really never should), then undo all previous
356 356 * EQ mapping, jump to the end, cleanup everything that has been
357 357 * successfully initialized, and return an error.
358 358 *
359 359 * This error is unique, in that an EQE is not generated if this event
360 360 * occurs. Instead, an interrupt is called and we must poll the
361 361 * Catastrophic Error buffer in CR-Space. This mapping is setup simply
362 362 * to enable this error reporting. We pass in a NULL handler since it
363 363 * will never be called.
364 364 */
365 365 status = tavor_eq_handler_init(state, state->ts_eqhdl[42],
366 366 TAVOR_EVT_MSK_LOCAL_CAT_ERROR, NULL);
367 367 if (status != DDI_SUCCESS) {
368 368 /* Set "status" and "errormsg" and goto failure */
369 369 TAVOR_TNF_FAIL(status, "local catastrophic error event");
370 370 goto all_eq_init_fail;
371 371 }
372 372 num_eq_unmap = 43;
373 373
374 374 /*
375 375 * Setup EQ43 for handling SRQ Catastrophic Error Events
376 376 *
377 377 * These events correspond to the similarly-named IB affiliated
378 378 * asynchronous error type. If this setup fails for any reason
379 379 * (which, in general, it really never should), then undo all previous
380 380 * EQ mapping, jump to the end, cleanup everything that has been
381 381 * successfully initialized, and return an error.
382 382 */
383 383 status = tavor_eq_handler_init(state, state->ts_eqhdl[43],
384 384 TAVOR_EVT_MSK_SRQ_CATASTROPHIC_ERROR,
385 385 tavor_srq_catastrophic_handler);
386 386 if (status != DDI_SUCCESS) {
387 387 /* Set "status" and "errormsg" and goto failure */
388 388 TAVOR_TNF_FAIL(status, "srq catastrophic error event");
389 389 goto all_eq_init_fail;
390 390 }
391 391 num_eq_unmap = 44;
392 392
393 393 /*
394 394 * Setup EQ44 for handling SRQ Last WQE Reached Events
395 395 *
396 396 * These events correspond to the similarly-named IB affiliated
397 397 * asynchronous event type. If this setup fails for any reason
398 398 * (which, in general, it really never should), then undo all previous
399 399 * EQ mapping, jump to the end, cleanup everything that has been
400 400 * successfully initialized, and return an error.
401 401 */
402 402 status = tavor_eq_handler_init(state, state->ts_eqhdl[44],
403 403 TAVOR_EVT_MSK_SRQ_LAST_WQE_REACHED,
404 404 tavor_srq_last_wqe_reached_handler);
405 405 if (status != DDI_SUCCESS) {
406 406 /* Set "status" and "errormsg" and goto failure */
407 407 TAVOR_TNF_FAIL(status, "srq last wqe reached event");
408 408 goto all_eq_init_fail;
409 409 }
410 410 num_eq_unmap = 45;
411 411
412 412 /*
413 413 * Setup EQ45 for handling ECC error detection events
414 414 *
415 415 * These events correspond to the similarly-named IB affiliated
416 416 * asynchronous event type. If this setup fails for any reason
417 417 * (which, in general, it really never should), then undo all previous
418 418 * EQ mapping, jump to the end, cleanup everything that has been
419 419 * successfully initialized, and return an error.
420 420 */
421 421 status = tavor_eq_handler_init(state, state->ts_eqhdl[45],
422 422 TAVOR_EVT_MSK_ECC_DETECTION,
423 423 tavor_ecc_detection_handler);
424 424 if (status != DDI_SUCCESS) {
425 425 /* Set "status" and "errormsg" and goto failure */
426 426 TAVOR_TNF_FAIL(status, "ecc detection event");
427 427 goto all_eq_init_fail;
428 428 }
429 429 num_eq_unmap = 46;
430 430
431 431 /*
432 432 * Setup EQ46 to catch all other types of events. Specifically, we
433 433 * do not catch the "Local EEC Catastrophic Error Event" because we
434 434 * should have no EEC (the Tavor driver does not support RD). We also
435 435 * choose not to handle any of the address translation page fault
436 436 * event types. Since we are not doing any page fault handling (and
437 437 * since the Tavor firmware does not currently support any such
438 438 * handling), we allow these events to go to the catch-all handler.
439 439 */
440 440 status = tavor_eq_handler_init(state, state->ts_eqhdl[46],
441 441 TAVOR_EVT_CATCHALL_MASK, tavor_no_eqhandler);
442 442 if (status != DDI_SUCCESS) {
443 443 /* Set "status" and "errormsg" and goto failure */
444 444 TAVOR_TNF_FAIL(status, "all other events");
445 445 TNF_PROBE_0(tavor_eq_init_all_allothershdlr_fail,
446 446 TAVOR_TNF_ERROR, "");
447 447 goto all_eq_init_fail;
448 448 }
449 449
450 450 TAVOR_TNF_EXIT(tavor_eq_init_all);
451 451 return (DDI_SUCCESS);
452 452
453 453 all_eq_init_fail:
454 454 /* Unmap any of the partially mapped EQs from above */
455 455 for (i = 0; i < num_eq_unmap; i++) {
456 456 (void) tavor_eq_handler_fini(state, state->ts_eqhdl[i]);
457 457 }
458 458
459 459 /* Free up any of the partially allocated EQs from above */
460 460 for (i = 0; i < num_eq_init; i++) {
461 461 (void) tavor_eq_free(state, &state->ts_eqhdl[i]);
462 462 }
463 463 TNF_PROBE_1(tavor_eq_init_all_fail, TAVOR_TNF_ERROR, "",
464 464 tnf_string, msg, errormsg);
465 465 TAVOR_TNF_EXIT(tavor_eq_init_all);
466 466 return (status);
467 467 }
468 468
469 469
470 470 /*
471 471 * tavor_eq_fini_all
472 472 * Context: Only called from attach() and/or detach() path contexts
473 473 */
474 474 int
475 475 tavor_eq_fini_all(tavor_state_t *state)
476 476 {
477 477 uint_t num_eq;
478 478 int status, i;
479 479
480 480 TAVOR_TNF_ENTER(tavor_eq_fini_all);
481 481
482 482 /*
483 483 * Grab the total number of supported EQs again. This is the same
484 484 * hardcoded value that was used above (during the event queue
485 485 * initialization.)
486 486 */
487 487 num_eq = TAVOR_NUM_EQ_USED;
488 488
489 489 /*
490 490 * For each of the event queues that we initialized and mapped
491 491 * earlier, attempt to unmap the events from the EQ.
492 492 */
493 493 for (i = 0; i < num_eq; i++) {
494 494 status = tavor_eq_handler_fini(state, state->ts_eqhdl[i]);
495 495 if (status != DDI_SUCCESS) {
496 496 TNF_PROBE_0(tavor_eq_fini_all_eqhdlfini_fail,
497 497 TAVOR_TNF_ERROR, "");
498 498 TAVOR_TNF_EXIT(tavor_eq_fini_all);
499 499 return (DDI_FAILURE);
500 500 }
501 501 }
502 502
503 503 /*
504 504 * Teardown and free up all the Event Queues that were allocated
505 505 * earlier.
506 506 */
507 507 for (i = 0; i < num_eq; i++) {
508 508 status = tavor_eq_free(state, &state->ts_eqhdl[i]);
509 509 if (status != DDI_SUCCESS) {
510 510 TNF_PROBE_0(tavor_eq_fini_all_eqfree_fail,
511 511 TAVOR_TNF_ERROR, "");
512 512 TAVOR_TNF_EXIT(tavor_eq_fini_all);
513 513 return (DDI_FAILURE);
514 514 }
515 515 }
516 516
517 517 TAVOR_TNF_EXIT(tavor_eq_fini_all);
518 518 return (DDI_SUCCESS);
519 519 }
520 520
521 521
522 522 /*
523 523 * tavor_eq_arm_all
524 524 * Context: Only called from attach() and/or detach() path contexts
525 525 */
526 526 void
527 527 tavor_eq_arm_all(tavor_state_t *state)
528 528 {
529 529 uint_t num_eq;
530 530 int i;
531 531
532 532 TAVOR_TNF_ENTER(tavor_eq_arm_all);
533 533
534 534 /*
535 535 * Grab the total number of supported EQs again. This is the same
536 536 * hardcoded value that was used above (during the event queue
537 537 * initialization.)
538 538 */
539 539 num_eq = TAVOR_NUM_EQ_USED;
540 540
541 541 /*
542 542 * For each of the event queues that we initialized and mapped
543 543 * earlier, attempt to arm it for event generation.
544 544 */
545 545 for (i = 0; i < num_eq; i++) {
546 546 tavor_eq_doorbell(state, TAVOR_EQDB_REARM_EQ, i, 0);
547 547 }
548 548
549 549 TAVOR_TNF_EXIT(tavor_eq_arm_all);
550 550 }
551 551
552 552
553 553 /*
554 554 * tavor_isr()
555 555 * Context: Only called from interrupt context (and during panic)
556 556 */
557 557 /* ARGSUSED */
558 558 uint_t
559 559 tavor_isr(caddr_t arg1, caddr_t arg2)
560 560 {
561 561 tavor_state_t *state;
562 562 uint64_t *ecr, *clr_int;
563 563 uint64_t ecrreg, int_mask;
564 564 uint_t status;
565 565 int i;
566 566
567 567 TAVOR_TNF_ENTER(tavor_isr);
568 568
569 569 /*
570 570 * Grab the Tavor softstate pointer from the input parameter
571 571 */
572 572 state = (tavor_state_t *)arg1;
573 573
574 574 /*
575 575 * Find the pointers to the ECR and clr_INT registers
576 576 */
577 577 ecr = state->ts_cmd_regs.ecr;
578 578 clr_int = state->ts_cmd_regs.clr_int;
579 579
580 580 /*
581 581 * Read the ECR register. Each of the 64 bits in the ECR register
582 582 * corresponds to an event queue. If a bit is set, then the
583 583 * corresponding event queue has fired.
584 584 */
585 585 ecrreg = ddi_get64(state->ts_reg_cmdhdl, ecr);
586 586
587 587 /*
588 588 * As long as there are bits set (i.e. as long as there are still
589 589 * EQs in the "fired" state), call tavor_eq_poll() to process each
590 590 * fired EQ. If no ECR bits are set, do not claim the interrupt.
591 591 */
592 592 status = DDI_INTR_UNCLAIMED;
593 593 do {
594 594 i = 0;
595 595 while (ecrreg != 0x0) {
596 596 if (ecrreg & 0x1) {
597 597 tavor_eq_poll(state, state->ts_eqhdl[i]);
598 598 status = DDI_INTR_CLAIMED;
599 599 }
600 600 ecrreg = ecrreg >> 1;
601 601 i++;
602 602 }
603 603
604 604 /*
605 605 * Clear the interrupt. Note: Depending on the type of
606 606 * event (interrupt or MSI), we need to use a different
607 607 * mask to clear the event. In the case of MSI, the bit
608 608 * to clear corresponds to the MSI number, and for legacy
609 609 * interrupts the bit corresponds to the value in 'inta_pin'.
610 610 */
611 611 if (state->ts_intr_type_chosen == DDI_INTR_TYPE_MSI) {
612 612 int_mask = ((uint64_t)1 << 0);
613 613 } else {
614 614 int_mask = ((uint64_t)1 << state->ts_adapter.inta_pin);
615 615 }
616 616 ddi_put64(state->ts_reg_cmdhdl, clr_int, int_mask);
617 617
618 618 /* Reread the ECR register */
619 619 ecrreg = ddi_get64(state->ts_reg_cmdhdl, ecr);
620 620
621 621 } while (ecrreg != 0x0);
622 622
623 623 TAVOR_TNF_EXIT(tavor_isr);
624 624 return (status);
625 625 }
626 626
627 627
628 628 /*
629 629 * tavor_eq_doorbell
630 630 * Context: Only called from interrupt context
631 631 */
632 632 void
633 633 tavor_eq_doorbell(tavor_state_t *state, uint32_t eq_cmd, uint32_t eqn,
634 634 uint32_t eq_param)
635 635 {
636 636 uint64_t doorbell = 0;
637 637
638 638 /* Build the doorbell from the parameters */
639 639 doorbell = ((uint64_t)eq_cmd << TAVOR_EQDB_CMD_SHIFT) |
640 640 ((uint64_t)eqn << TAVOR_EQDB_EQN_SHIFT) | eq_param;
641 641
642 642 TNF_PROBE_1_DEBUG(tavor_eq_doorbell, TAVOR_TNF_TRACE, "",
643 643 tnf_ulong, doorbell, doorbell);
644 644
645 645 /* Write the doorbell to UAR */
646 646 TAVOR_UAR_DOORBELL(state, (uint64_t *)&state->ts_uar->eq,
647 647 doorbell);
648 648 }
649 649
650 650 /*
651 651 * tavor_eq_poll
652 652 * Context: Only called from interrupt context (and during panic)
653 653 */
654 654 static void
655 655 tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq)
↓ open down ↓ |
655 lines elided |
↑ open up ↑ |
656 656 {
657 657 uint64_t *clr_ecr;
658 658 tavor_hw_eqe_t *eqe;
659 659 uint64_t ecr_mask;
660 660 uint32_t cons_indx, wrap_around_mask;
661 661 int (*eqfunction)(tavor_state_t *state, tavor_eqhdl_t eq,
662 662 tavor_hw_eqe_t *eqe);
663 663
664 664 TAVOR_TNF_ENTER(tavor_eq_poll);
665 665
666 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eq))
667 -
668 666 /* Find the pointer to the clr_ECR register */
669 667 clr_ecr = state->ts_cmd_regs.clr_ecr;
670 668
671 669 /*
672 670 * Check for Local Catastrophic Error If we have this kind of error,
673 671 * then we don't need to do anything else here, as this kind of
674 672 * catastrophic error is handled separately. So we call the
675 673 * catastrophic handler, clear the ECR and then return.
676 674 */
677 675 if (eq->eq_evttypemask == TAVOR_EVT_MSK_LOCAL_CAT_ERROR) {
678 676 /*
679 677 * Call Catastrophic Error handler
680 678 */
681 679 tavor_eq_catastrophic(state);
682 680
683 681 /*
684 682 * Clear the ECR. Specifically, clear the bit corresponding
685 683 * to the event queue just processed.
686 684 */
687 685 ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
688 686 ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
689 687
690 688 TAVOR_TNF_EXIT(tavor_eq_poll);
691 689 return;
692 690 }
693 691
694 692 /* Get the consumer pointer index */
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
695 693 cons_indx = eq->eq_consindx;
696 694
697 695 /*
698 696 * Calculate the wrap around mask. Note: This operation only works
699 697 * because all Tavor event queues have power-of-2 sizes
700 698 */
701 699 wrap_around_mask = (eq->eq_bufsz - 1);
702 700
703 701 /* Calculate the pointer to the first EQ entry */
704 702 eqe = &eq->eq_buf[cons_indx];
705 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eqe))
706 703
707 704 /*
708 705 * Sync the current EQE to read
709 706 * We need to force a ddi_dma_sync() here (independent of how the
710 707 * EQ was mapped) because it is possible for us to receive the
711 708 * interrupt, do a read of the ECR, and have each of these
712 709 * operations complete successfully even though the hardware's DMA
713 710 * to the EQ has not yet completed.
714 711 */
715 712 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU, TAVOR_EQ_SYNC_FORCE);
716 713
717 714 /*
718 715 * Pull the handler function for this EQ from the Tavor Event Queue
719 716 * handle
720 717 */
721 718 eqfunction = eq->eq_func;
722 719
723 720 /*
724 721 * Keep pulling entries from the EQ until we find an entry owner by
725 722 * the hardware. As long as there the EQE's owned by SW, process
726 723 * each entry by calling its handler function and updating the EQ
727 724 * consumer index.
728 725 */
729 726 do {
730 727 while (TAVOR_EQE_OWNER_IS_SW(eq, eqe)) {
731 728 /*
732 729 * Call the EQ handler function. But only call if we
733 730 * are not in polled I/O mode (i.e. not processing
734 731 * because of a system panic). Note: We don't call
735 732 * the EQ handling functions from a system panic
736 733 * because we are primarily concerned only with
737 734 * ensuring that the event queues do not overflow (or,
738 735 * more specifically, the event queue associated with
739 736 * the CQ that is being used in the sync/dump process).
740 737 * Also, we don't want to make any upcalls (to the
741 738 * IBTF) because we can't guarantee when/if those
742 739 * calls would ever return. And, if we're in panic,
743 740 * then we reached here through a PollCQ() call (from
744 741 * tavor_cq_poll()), and we need to ensure that we
745 742 * successfully return any work completions to the
746 743 * caller.
747 744 */
748 745 if (ddi_in_panic() == 0) {
749 746 eqfunction(state, eq, eqe);
750 747 }
751 748
752 749 /* Reset entry to hardware ownership */
753 750 TAVOR_EQE_OWNER_SET_HW(eq, eqe);
754 751
755 752 /* Sync the current EQE for device */
756 753 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORDEV,
757 754 TAVOR_EQ_SYNC_NORMAL);
758 755
759 756 /* Increment the consumer index */
760 757 cons_indx = (cons_indx + 1) & wrap_around_mask;
761 758
762 759 /* Update the pointer to the next EQ entry */
763 760 eqe = &eq->eq_buf[cons_indx];
764 761
765 762 /* Sync the next EQE to read */
766 763 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU,
767 764 TAVOR_EQ_SYNC_NORMAL);
768 765 }
769 766
770 767 /*
771 768 * Clear the ECR. Specifically, clear the bit corresponding
772 769 * to the event queue just processed.
773 770 */
774 771 ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
775 772 ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
776 773
777 774 /* Write an EQ doorbell to update the consumer index */
778 775 eq->eq_consindx = cons_indx;
779 776 tavor_eq_doorbell(state, TAVOR_EQDB_SET_CONSINDX, eq->eq_eqnum,
780 777 cons_indx);
781 778
782 779 /* Write another EQ doorbell to rearm */
783 780 tavor_eq_doorbell(state, TAVOR_EQDB_REARM_EQ, eq->eq_eqnum, 0);
784 781
785 782 /*
786 783 * NOTE: Due to the nature of Mellanox hardware, we do not have
787 784 * to do an explicit PIO read to ensure that the doorbell write
788 785 * has been flushed to the hardware. There is state encoded in
789 786 * the doorbell information we write which makes this
790 787 * unnecessary. We can be assured that if an event needs to be
791 788 * generated, the hardware will make sure that it is, solving
792 789 * the possible race condition.
793 790 */
794 791
795 792 /* Sync the next EQE to read */
796 793 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU,
797 794 TAVOR_EQ_SYNC_NORMAL);
798 795
799 796 } while (TAVOR_EQE_OWNER_IS_SW(eq, eqe));
800 797
801 798 TAVOR_TNF_EXIT(tavor_eq_poll);
802 799 }
803 800
804 801
805 802 /*
806 803 * tavor_eq_catastrophic
807 804 * Context: Only called from interrupt context (and during panic)
808 805 */
809 806 static void
810 807 tavor_eq_catastrophic(tavor_state_t *state)
811 808 {
812 809 ibt_async_code_t type;
813 810 ibc_async_event_t event;
814 811 uint32_t *base_addr;
815 812 uint32_t buf_size;
816 813 uint32_t word;
817 814 uint8_t err_type;
818 815 uint32_t err_buf;
819 816 int i;
820 817
821 818 TAVOR_TNF_ENTER(tavor_eq_catastrophic);
822 819
823 820 bzero(&event, sizeof (ibc_async_event_t));
824 821
825 822 base_addr = (uint32_t *)(uintptr_t)(
826 823 (uintptr_t)state->ts_reg_cmd_baseaddr +
827 824 state->ts_fw.error_buf_addr);
828 825 buf_size = state->ts_fw.error_buf_sz;
829 826
830 827 word = ddi_get32(state->ts_reg_cmdhdl, base_addr);
831 828
832 829 err_type = (word & 0xFF000000) >> 24;
833 830 type = IBT_ERROR_LOCAL_CATASTROPHIC;
834 831
835 832 switch (err_type) {
836 833 case TAVOR_CATASTROPHIC_INTERNAL_ERROR:
837 834 cmn_err(CE_WARN, "Catastrophic Internal Error: 0x%02x",
838 835 err_type);
839 836
840 837 break;
841 838
842 839 case TAVOR_CATASTROPHIC_UPLINK_BUS_ERROR:
843 840 cmn_err(CE_WARN, "Catastrophic Uplink Bus Error: 0x%02x",
844 841 err_type);
845 842
846 843 break;
847 844
848 845 case TAVOR_CATASTROPHIC_DDR_DATA_ERROR:
849 846 cmn_err(CE_WARN, "Catastrophic DDR Data Error: 0x%02x",
850 847 err_type);
851 848
852 849 break;
853 850
854 851 case TAVOR_CATASTROPHIC_INTERNAL_PARITY_ERROR:
855 852 cmn_err(CE_WARN, "Catastrophic Internal Parity Error: 0x%02x",
856 853 err_type);
857 854
858 855 break;
859 856
860 857 default:
861 858 /* Unknown type of Catastrophic error */
862 859 cmn_err(CE_WARN, "Catastrophic Unknown Error: 0x%02x",
863 860 err_type);
864 861
865 862 break;
866 863 }
867 864
868 865 /*
869 866 * Read in the catastrophic error buffer from the hardware, printing
870 867 * only to the log file only
871 868 */
872 869 for (i = 0; i < buf_size; i += 4) {
873 870 base_addr = (uint32_t *)((uintptr_t)(state->ts_reg_cmd_baseaddr
874 871 + state->ts_fw.error_buf_addr + (i * 4)));
875 872 err_buf = ddi_get32(state->ts_reg_cmdhdl, base_addr);
876 873 cmn_err(CE_WARN, "catastrophic_error[%02x]: %08X", i, err_buf);
877 874 }
878 875
879 876 /*
880 877 * We also call the IBTF here to inform it of the catastrophic error.
881 878 * Note: Since no event information (i.e. QP handles, CQ handles,
882 879 * etc.) is necessary, we pass a NULL pointer instead of a pointer to
883 880 * an empty ibc_async_event_t struct.
884 881 *
885 882 * But we also check if "ts_ibtfpriv" is NULL. If it is then it
886 883 * means that we've have either received this event before we
887 884 * finished attaching to the IBTF or we've received it while we
888 885 * are in the process of detaching.
889 886 */
890 887 if (state->ts_ibtfpriv != NULL) {
891 888 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
892 889 }
893 890
894 891 TAVOR_TNF_EXIT(tavor_eq_catastrophic);
895 892 }
896 893
897 894
898 895 /*
899 896 * tavor_eq_alloc()
900 897 * Context: Only called from attach() path context
901 898 */
902 899 static int
903 900 tavor_eq_alloc(tavor_state_t *state, uint32_t log_eq_size, uint_t intr,
904 901 tavor_eqhdl_t *eqhdl)
905 902 {
906 903 tavor_rsrc_t *eqc, *rsrc;
907 904 tavor_hw_eqc_t eqc_entry;
908 905 tavor_eqhdl_t eq;
909 906 ibt_mr_attr_t mr_attr;
910 907 tavor_mr_options_t op;
911 908 tavor_pdhdl_t pd;
912 909 tavor_mrhdl_t mr;
913 910 tavor_hw_eqe_t *buf;
914 911 uint64_t addr;
915 912 uint32_t lkey;
916 913 uint_t dma_xfer_mode;
917 914 int status, i;
918 915 char *errormsg;
919 916
920 917 TAVOR_TNF_ENTER(tavor_eq_alloc);
921 918
922 919 /* Use the internal protection domain (PD) for setting up EQs */
923 920 pd = state->ts_pdhdl_internal;
924 921
925 922 /* Increment the reference count on the protection domain (PD) */
926 923 tavor_pd_refcnt_inc(pd);
927 924
928 925 /*
929 926 * Allocate an EQ context entry. This will be filled in with all
930 927 * the necessary parameters to define the Event Queue. And then
931 928 * ownership will be passed to the hardware in the final step
932 929 * below. If we fail here, we must undo the protection domain
933 930 * reference count.
934 931 */
935 932 status = tavor_rsrc_alloc(state, TAVOR_EQC, 1, TAVOR_SLEEP, &eqc);
936 933 if (status != DDI_SUCCESS) {
937 934 /* Set "status" and "errormsg" and goto failure */
938 935 TAVOR_TNF_FAIL(DDI_FAILURE, "failed EQ context");
939 936 goto eqalloc_fail1;
940 937 }
941 938
942 939 /*
943 940 * Allocate the software structure for tracking the event queue (i.e.
944 941 * the Tavor Event Queue handle). If we fail here, we must undo the
945 942 * protection domain reference count and the previous resource
946 943 * allocation.
947 944 */
948 945 status = tavor_rsrc_alloc(state, TAVOR_EQHDL, 1, TAVOR_SLEEP, &rsrc);
949 946 if (status != DDI_SUCCESS) {
950 947 /* Set "status" and "errormsg" and goto failure */
951 948 TAVOR_TNF_FAIL(DDI_FAILURE, "failed EQ handler");
952 949 goto eqalloc_fail2;
953 950 }
954 951 eq = (tavor_eqhdl_t)rsrc->tr_addr;
955 952
956 953 /*
957 954 * Allocate the memory for Event Queue. Note: Although we use the
958 955 * common queue allocation routine, we always specify
959 956 * TAVOR_QUEUE_LOCATION_NORMAL (i.e. EQ located in system memory)
960 957 * because it would be inefficient to have EQs located in DDR memory.
961 958 * This is primarily because EQs are read from (by software) more
962 959 * than they are written to. Also note that, unlike Tavor QP work
963 960 * queues, event queues do not have the same strict alignment
964 961 * requirements. It is sufficient for the EQ memory to be both
965 962 * aligned to and bound to addresses which are a multiple of EQE size.
966 963 */
967 964 eq->eq_eqinfo.qa_size = (1 << log_eq_size) * sizeof (tavor_hw_eqe_t);
968 965 eq->eq_eqinfo.qa_alloc_align = sizeof (tavor_hw_eqe_t);
969 966 eq->eq_eqinfo.qa_bind_align = sizeof (tavor_hw_eqe_t);
970 967 eq->eq_eqinfo.qa_location = TAVOR_QUEUE_LOCATION_NORMAL;
971 968 status = tavor_queue_alloc(state, &eq->eq_eqinfo, TAVOR_SLEEP);
972 969 if (status != DDI_SUCCESS) {
973 970 /* Set "status" and "errormsg" and goto failure */
974 971 TAVOR_TNF_FAIL(DDI_FAILURE, "failed event queue");
975 972 goto eqalloc_fail3;
976 973 }
977 974 buf = (tavor_hw_eqe_t *)eq->eq_eqinfo.qa_buf_aligned;
978 975
979 976 /*
980 977 * Initialize each of the Event Queue Entries (EQE) by setting their
981 978 * ownership to hardware ("owner" bit set to HW). This is in
982 979 * preparation for the final transfer of ownership (below) of the
983 980 * EQ context itself.
984 981 */
985 982 for (i = 0; i < (1 << log_eq_size); i++) {
986 983 TAVOR_EQE_OWNER_SET_HW(eq, &buf[i]);
987 984 }
988 985
989 986 /*
990 987 * Register the memory for the EQ. The memory for the EQ must
991 988 * be registered in the Tavor TPT tables. This gives us the LKey
992 989 * to specify in the EQ context below.
993 990 *
994 991 * Because we are in the attach path we use NOSLEEP here so that we
995 992 * SPIN in the HCR since the event queues are not setup yet, and we
996 993 * cannot NOSPIN at this point in time.
997 994 */
998 995 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
999 996 mr_attr.mr_len = eq->eq_eqinfo.qa_size;
1000 997 mr_attr.mr_as = NULL;
1001 998 mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
1002 999 dma_xfer_mode = state->ts_cfg_profile->cp_streaming_consistent;
1003 1000 if (dma_xfer_mode == DDI_DMA_STREAMING) {
1004 1001 mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
↓ open down ↓ |
289 lines elided |
↑ open up ↑ |
1005 1002 }
1006 1003 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1007 1004 op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl;
1008 1005 op.mro_bind_override_addr = 0;
1009 1006 status = tavor_mr_register(state, pd, &mr_attr, &mr, &op);
1010 1007 if (status != DDI_SUCCESS) {
1011 1008 /* Set "status" and "errormsg" and goto failure */
1012 1009 TAVOR_TNF_FAIL(DDI_FAILURE, "failed register mr");
1013 1010 goto eqalloc_fail4;
1014 1011 }
1015 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
1016 1012 addr = mr->mr_bindinfo.bi_addr;
1017 1013 lkey = mr->mr_lkey;
1018 1014
1019 1015 /* Determine if later ddi_dma_sync will be necessary */
1020 1016 eq->eq_sync = TAVOR_EQ_IS_SYNC_REQ(state, eq->eq_eqinfo);
1021 1017
1022 1018 /* Sync entire EQ for use by the hardware (if necessary) */
1023 1019 if (eq->eq_sync) {
1024 1020 (void) ddi_dma_sync(mr->mr_bindinfo.bi_dmahdl, 0,
1025 1021 eq->eq_eqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
1026 1022 }
1027 1023
1028 1024 /*
1029 1025 * Fill in the EQC entry. This is the final step before passing
1030 1026 * ownership of the EQC entry to the Tavor hardware. We use all of
1031 1027 * the information collected/calculated above to fill in the
1032 1028 * requisite portions of the EQC. Note: We create all EQs in the
1033 1029 * "fired" state. We will arm them later (after our interrupt
1034 1030 * routine had been registered.)
1035 1031 */
1036 1032 bzero(&eqc_entry, sizeof (tavor_hw_eqc_t));
1037 1033 eqc_entry.owner = TAVOR_HW_OWNER;
1038 1034 eqc_entry.xlat = TAVOR_VA2PA_XLAT_ENABLED;
1039 1035 eqc_entry.state = TAVOR_EQ_FIRED;
1040 1036 eqc_entry.start_addr_h = (addr >> 32);
1041 1037 eqc_entry.start_addr_l = (addr & 0xFFFFFFFF);
1042 1038 eqc_entry.log_eq_sz = log_eq_size;
1043 1039 eqc_entry.usr_page = 0;
1044 1040 eqc_entry.pd = pd->pd_pdnum;
1045 1041 eqc_entry.intr = intr;
1046 1042 eqc_entry.lkey = lkey;
1047 1043
1048 1044 /*
1049 1045 * Write the EQC entry to hardware. Lastly, we pass ownership of
1050 1046 * the entry to the hardware (using the Tavor SW2HW_EQ firmware
1051 1047 * command). Note: in general, this operation shouldn't fail. But
1052 1048 * if it does, we have to undo everything we've done above before
1053 1049 * returning error.
1054 1050 */
1055 1051 status = tavor_cmn_ownership_cmd_post(state, SW2HW_EQ, &eqc_entry,
1056 1052 sizeof (tavor_hw_eqc_t), eqc->tr_indx, TAVOR_CMD_NOSLEEP_SPIN);
1057 1053 if (status != TAVOR_CMD_SUCCESS) {
1058 1054 cmn_err(CE_CONT, "Tavor: SW2HW_EQ command failed: %08x\n",
1059 1055 status);
1060 1056 TNF_PROBE_1(tavor_eq_alloc_sw2hw_eq_cmd_fail,
1061 1057 TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1062 1058 /* Set "status" and "errormsg" and goto failure */
1063 1059 TAVOR_TNF_FAIL(ibc_get_ci_failure(0), "tavor SW2HW_EQ command");
1064 1060 goto eqalloc_fail5;
1065 1061 }
1066 1062
1067 1063 /*
1068 1064 * Fill in the rest of the Tavor Event Queue handle. Having
1069 1065 * successfully transferred ownership of the EQC, we can update the
1070 1066 * following fields for use in further operations on the EQ.
1071 1067 */
1072 1068 eq->eq_eqcrsrcp = eqc;
1073 1069 eq->eq_rsrcp = rsrc;
1074 1070 eq->eq_consindx = 0;
1075 1071 eq->eq_eqnum = eqc->tr_indx;
1076 1072 eq->eq_buf = buf;
1077 1073 eq->eq_bufsz = (1 << log_eq_size);
1078 1074 eq->eq_mrhdl = mr;
1079 1075 *eqhdl = eq;
1080 1076
1081 1077 TAVOR_TNF_EXIT(tavor_eq_alloc);
1082 1078 return (DDI_SUCCESS);
1083 1079
1084 1080 /*
1085 1081 * The following is cleanup for all possible failure cases in this routine
1086 1082 */
1087 1083 eqalloc_fail5:
1088 1084 if (tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
1089 1085 TAVOR_NOSLEEP) != DDI_SUCCESS) {
1090 1086 TAVOR_WARNING(state, "failed to deregister EQ memory");
1091 1087 }
1092 1088 eqalloc_fail4:
1093 1089 tavor_queue_free(state, &eq->eq_eqinfo);
1094 1090 eqalloc_fail3:
1095 1091 tavor_rsrc_free(state, &rsrc);
1096 1092 eqalloc_fail2:
1097 1093 tavor_rsrc_free(state, &eqc);
1098 1094 eqalloc_fail1:
1099 1095 tavor_pd_refcnt_dec(pd);
1100 1096 eqalloc_fail:
1101 1097 TNF_PROBE_1(tavor_eq_alloc_fail, TAVOR_TNF_ERROR, "",
1102 1098 tnf_string, msg, errormsg);
1103 1099 TAVOR_TNF_EXIT(tavor_eq_alloc);
1104 1100 return (status);
1105 1101 }
1106 1102
1107 1103
1108 1104 /*
1109 1105 * tavor_eq_free()
1110 1106 * Context: Only called from attach() and/or detach() path contexts
1111 1107 */
1112 1108 static int
1113 1109 tavor_eq_free(tavor_state_t *state, tavor_eqhdl_t *eqhdl)
1114 1110 {
1115 1111 tavor_rsrc_t *eqc, *rsrc;
1116 1112 tavor_hw_eqc_t eqc_entry;
1117 1113 tavor_pdhdl_t pd;
1118 1114 tavor_mrhdl_t mr;
1119 1115 tavor_eqhdl_t eq;
1120 1116 uint32_t eqnum;
1121 1117 int status;
1122 1118
1123 1119 TAVOR_TNF_ENTER(tavor_eq_free);
1124 1120
1125 1121 /*
1126 1122 * Pull all the necessary information from the Tavor Event Queue
1127 1123 * handle. This is necessary here because the resource for the
1128 1124 * EQ handle is going to be freed up as part of this operation.
1129 1125 */
1130 1126 eq = *eqhdl;
1131 1127 eqc = eq->eq_eqcrsrcp;
1132 1128 rsrc = eq->eq_rsrcp;
1133 1129 pd = state->ts_pdhdl_internal;
1134 1130 mr = eq->eq_mrhdl;
1135 1131 eqnum = eq->eq_eqnum;
1136 1132
1137 1133 /*
1138 1134 * Reclaim EQC entry from hardware (using the Tavor HW2SW_EQ
1139 1135 * firmware command). If the ownership transfer fails for any reason,
1140 1136 * then it is an indication that something (either in HW or SW) has
1141 1137 * gone seriously wrong.
1142 1138 */
1143 1139 status = tavor_cmn_ownership_cmd_post(state, HW2SW_EQ, &eqc_entry,
1144 1140 sizeof (tavor_hw_eqc_t), eqnum, TAVOR_CMD_NOSLEEP_SPIN);
1145 1141 if (status != TAVOR_CMD_SUCCESS) {
1146 1142 TAVOR_WARNING(state, "failed to reclaim EQC ownership");
1147 1143 cmn_err(CE_CONT, "Tavor: HW2SW_EQ command failed: %08x\n",
1148 1144 status);
1149 1145 TNF_PROBE_1(tavor_eq_free_hw2sw_eq_cmd_fail,
1150 1146 TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1151 1147 TAVOR_TNF_EXIT(tavor_eq_free);
1152 1148 return (DDI_FAILURE);
1153 1149 }
1154 1150
1155 1151 /*
1156 1152 * Deregister the memory for the Event Queue. If this fails
1157 1153 * for any reason, then it is an indication that something (either
1158 1154 * in HW or SW) has gone seriously wrong. So we print a warning
1159 1155 * message and continue.
1160 1156 */
1161 1157 status = tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
1162 1158 TAVOR_NOSLEEP);
1163 1159 if (status != DDI_SUCCESS) {
1164 1160 TAVOR_WARNING(state, "failed to deregister EQ memory");
1165 1161 TNF_PROBE_0(tavor_eq_free_dereg_mr_fail, TAVOR_TNF_ERROR, "");
1166 1162 TAVOR_TNF_EXIT(tavor_eq_free);
1167 1163 }
1168 1164
1169 1165 /* Free the memory for the EQ */
1170 1166 tavor_queue_free(state, &eq->eq_eqinfo);
1171 1167
1172 1168 /* Free the Tavor Event Queue handle */
1173 1169 tavor_rsrc_free(state, &rsrc);
1174 1170
1175 1171 /* Free up the EQC entry resource */
1176 1172 tavor_rsrc_free(state, &eqc);
1177 1173
1178 1174 /* Decrement the reference count on the protection domain (PD) */
1179 1175 tavor_pd_refcnt_dec(pd);
1180 1176
1181 1177 /* Set the eqhdl pointer to NULL and return success */
1182 1178 *eqhdl = NULL;
1183 1179
1184 1180 TAVOR_TNF_EXIT(tavor_eq_free);
1185 1181 return (DDI_SUCCESS);
1186 1182 }
1187 1183
1188 1184
1189 1185 /*
1190 1186 * tavor_eq_handler_init
1191 1187 * Context: Only called from attach() path context
1192 1188 */
1193 1189 static int
1194 1190 tavor_eq_handler_init(tavor_state_t *state, tavor_eqhdl_t eq,
1195 1191 uint_t evt_type_mask, int (*eq_func)(tavor_state_t *state,
1196 1192 tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe))
1197 1193 {
1198 1194 int status;
1199 1195
1200 1196 TAVOR_TNF_ENTER(tavor_eq_handler_init);
1201 1197
1202 1198 /*
1203 1199 * Save away the EQ handler function and the event type mask. These
1204 1200 * will be used later during interrupt and event queue processing.
1205 1201 */
1206 1202 eq->eq_func = eq_func;
1207 1203 eq->eq_evttypemask = evt_type_mask;
1208 1204
1209 1205 /*
1210 1206 * Map the EQ to a specific class of event (or events) depending
1211 1207 * on the mask value passed in. The TAVOR_EVT_NO_MASK means not
1212 1208 * to attempt associating the EQ with any specific class of event.
1213 1209 * This is particularly useful when initializing the events queues
1214 1210 * used for CQ events. The mapping is done using the Tavor MAP_EQ
1215 1211 * firmware command. Note: This command should not, in general, fail.
1216 1212 * If it does, then something (probably HW related) has gone seriously
1217 1213 * wrong.
1218 1214 */
1219 1215 if (evt_type_mask != TAVOR_EVT_NO_MASK) {
1220 1216 status = tavor_map_eq_cmd_post(state,
1221 1217 TAVOR_CMD_MAP_EQ_EVT_MAP, eq->eq_eqnum, evt_type_mask,
1222 1218 TAVOR_CMD_NOSLEEP_SPIN);
1223 1219 if (status != TAVOR_CMD_SUCCESS) {
1224 1220 cmn_err(CE_CONT, "Tavor: MAP_EQ command failed: "
1225 1221 "%08x\n", status);
1226 1222 TNF_PROBE_1(tavor_eq_handler_init_map_eq_cmd_fail,
1227 1223 TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1228 1224 TAVOR_TNF_EXIT(tavor_eq_handler_init);
1229 1225 return (DDI_FAILURE);
1230 1226 }
1231 1227 }
1232 1228
1233 1229 TAVOR_TNF_EXIT(tavor_eq_handler_init);
1234 1230 return (DDI_SUCCESS);
1235 1231 }
1236 1232
1237 1233
1238 1234 /*
1239 1235 * tavor_eq_handler_fini
1240 1236 * Context: Only called from attach() and/or detach() path contexts
1241 1237 */
1242 1238 static int
1243 1239 tavor_eq_handler_fini(tavor_state_t *state, tavor_eqhdl_t eq)
1244 1240 {
1245 1241 int status;
1246 1242
1247 1243 TAVOR_TNF_ENTER(tavor_eq_handler_fini);
1248 1244
1249 1245 /*
1250 1246 * Unmap the EQ from the event class to which it had been previously
1251 1247 * mapped. The unmapping is done using the Tavor MAP_EQ (in much
1252 1248 * the same way that the initial mapping was done). The difference,
1253 1249 * however, is in the TAVOR_EQ_EVT_UNMAP flag that is passed to the
1254 1250 * MAP_EQ firmware command. The TAVOR_EVT_NO_MASK (which may have
1255 1251 * been passed in at init time) still means that no association has
1256 1252 * been made between the EQ and any specific class of event (and,
1257 1253 * hence, no unmapping is necessary). Note: This command should not,
1258 1254 * in general, fail. If it does, then something (probably HW related)
1259 1255 * has gone seriously wrong.
1260 1256 */
1261 1257 if (eq->eq_evttypemask != TAVOR_EVT_NO_MASK) {
1262 1258 status = tavor_map_eq_cmd_post(state,
1263 1259 TAVOR_CMD_MAP_EQ_EVT_UNMAP, eq->eq_eqnum,
1264 1260 eq->eq_evttypemask, TAVOR_CMD_NOSLEEP_SPIN);
1265 1261 if (status != TAVOR_CMD_SUCCESS) {
1266 1262 cmn_err(CE_CONT, "Tavor: MAP_EQ command failed: "
1267 1263 "%08x\n", status);
1268 1264 TNF_PROBE_1(tavor_eq_handler_fini_map_eq_cmd_fail,
1269 1265 TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1270 1266 TAVOR_TNF_EXIT(tavor_eq_handler_fini);
1271 1267 return (DDI_FAILURE);
1272 1268 }
1273 1269 }
1274 1270
1275 1271 TAVOR_TNF_EXIT(tavor_eq_handler_fini);
1276 1272 return (DDI_SUCCESS);
1277 1273 }
1278 1274
1279 1275
1280 1276 /*
1281 1277 * tavor_eqe_sync()
1282 1278 * Context: Can be called from interrupt or base context.
1283 1279 *
1284 1280 * Typically, this routine does nothing unless the EQ memory is
1285 1281 * mapped as DDI_DMA_STREAMING. However, there is a condition where
1286 1282 * ddi_dma_sync() is necessary even if the memory was mapped in
1287 1283 * consistent mode. The "force_sync" parameter is used here to force
1288 1284 * the call to ddi_dma_sync() independent of how the EQ memory was
1289 1285 * mapped.
1290 1286 */
1291 1287 static void
1292 1288 tavor_eqe_sync(tavor_eqhdl_t eq, tavor_hw_eqe_t *eqe, uint_t flag,
1293 1289 uint_t force_sync)
1294 1290 {
1295 1291 ddi_dma_handle_t dmahdl;
1296 1292 off_t offset;
1297 1293 int status;
1298 1294
1299 1295 TAVOR_TNF_ENTER(tavor_eqe_sync);
1300 1296
1301 1297 /* Determine if EQ needs to be synced or not */
1302 1298 if ((eq->eq_sync == 0) && (force_sync == TAVOR_EQ_SYNC_NORMAL)) {
1303 1299 TAVOR_TNF_EXIT(tavor_eqe_sync);
1304 1300 return;
1305 1301 }
1306 1302
1307 1303 /* Get the DMA handle from EQ context */
1308 1304 dmahdl = eq->eq_mrhdl->mr_bindinfo.bi_dmahdl;
1309 1305
1310 1306 /* Calculate offset of next EQE */
1311 1307 offset = (off_t)((uintptr_t)eqe - (uintptr_t)&eq->eq_buf[0]);
1312 1308 status = ddi_dma_sync(dmahdl, offset, sizeof (tavor_hw_eqe_t), flag);
1313 1309 if (status != DDI_SUCCESS) {
1314 1310 TNF_PROBE_0(tavor_eqe_sync_getnextentry_fail,
1315 1311 TAVOR_TNF_ERROR, "");
1316 1312 TAVOR_TNF_EXIT(tavor_eqe_sync);
1317 1313 return;
1318 1314 }
1319 1315
1320 1316 TAVOR_TNF_EXIT(tavor_eqe_sync);
1321 1317 }
1322 1318
1323 1319
1324 1320 /*
1325 1321 * tavor_port_state_change_handler()
1326 1322 * Context: Only called from interrupt context
1327 1323 */
1328 1324 static int
1329 1325 tavor_port_state_change_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1330 1326 tavor_hw_eqe_t *eqe)
1331 1327 {
1332 1328 ibc_async_event_t event;
1333 1329 ibt_async_code_t type;
1334 1330 uint_t port, subtype;
1335 1331 uint_t eqe_evttype;
1336 1332 char link_msg[24];
1337 1333
1338 1334 TAVOR_TNF_ENTER(tavor_port_state_change_handler);
1339 1335
1340 1336 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1341 1337
1342 1338 ASSERT(eqe_evttype == TAVOR_EVT_PORT_STATE_CHANGE ||
1343 1339 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1344 1340
1345 1341 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1346 1342 TNF_PROBE_0(tavor_port_state_change_eq_overflow_condition,
1347 1343 TAVOR_TNF_ERROR, "");
1348 1344 tavor_eq_overflow_handler(state, eq, eqe);
1349 1345
1350 1346 TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1351 1347 return (DDI_FAILURE);
1352 1348 }
1353 1349
1354 1350 /*
1355 1351 * Depending on the type of Port State Change event, pass the
1356 1352 * appropriate asynch event to the IBTF.
1357 1353 */
1358 1354 port = TAVOR_EQE_PORTNUM_GET(eq, eqe);
1359 1355
1360 1356 /* Check for valid port number in event */
1361 1357 if ((port == 0) || (port > state->ts_cfg_profile->cp_num_ports)) {
1362 1358 TAVOR_WARNING(state, "Unexpected port number in port state "
1363 1359 "change event");
1364 1360 cmn_err(CE_CONT, " Port number: %02x\n", port);
1365 1361 TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1366 1362 return (DDI_FAILURE);
1367 1363 }
1368 1364
1369 1365 subtype = TAVOR_EQE_EVTSUBTYPE_GET(eq, eqe);
1370 1366 if (subtype == TAVOR_PORT_LINK_ACTIVE) {
1371 1367 event.ev_port = port;
1372 1368 type = IBT_EVENT_PORT_UP;
1373 1369
1374 1370 (void) snprintf(link_msg, 23, "port %d up", port);
1375 1371 ddi_dev_report_fault(state->ts_dip, DDI_SERVICE_RESTORED,
1376 1372 DDI_EXTERNAL_FAULT, link_msg);
1377 1373 } else if (subtype == TAVOR_PORT_LINK_DOWN) {
1378 1374 event.ev_port = port;
1379 1375 type = IBT_ERROR_PORT_DOWN;
1380 1376
1381 1377 (void) snprintf(link_msg, 23, "port %d down", port);
1382 1378 ddi_dev_report_fault(state->ts_dip, DDI_SERVICE_LOST,
1383 1379 DDI_EXTERNAL_FAULT, link_msg);
1384 1380 } else {
1385 1381 TAVOR_WARNING(state, "Unexpected subtype in port state change "
1386 1382 "event");
1387 1383 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n",
1388 1384 TAVOR_EQE_EVTTYPE_GET(eq, eqe), subtype);
1389 1385 TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1390 1386 return (DDI_FAILURE);
1391 1387 }
1392 1388
1393 1389 /*
1394 1390 * Deliver the event to the IBTF. Note: If "ts_ibtfpriv" is NULL,
1395 1391 * then we have either received this event before we finished
1396 1392 * attaching to the IBTF or we've received it while we are in the
1397 1393 * process of detaching.
1398 1394 */
1399 1395 if (state->ts_ibtfpriv != NULL) {
1400 1396 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1401 1397 }
1402 1398
1403 1399 TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1404 1400 return (DDI_SUCCESS);
1405 1401 }
1406 1402
1407 1403
1408 1404 /*
1409 1405 * tavor_comm_estbl_handler()
1410 1406 * Context: Only called from interrupt context
1411 1407 */
1412 1408 static int
1413 1409 tavor_comm_estbl_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1414 1410 tavor_hw_eqe_t *eqe)
1415 1411 {
1416 1412 tavor_qphdl_t qp;
1417 1413 uint_t qpnum;
1418 1414 ibc_async_event_t event;
1419 1415 ibt_async_code_t type;
1420 1416 uint_t eqe_evttype;
1421 1417
1422 1418 TAVOR_TNF_ENTER(tavor_comm_estbl_handler);
1423 1419
1424 1420 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1425 1421
1426 1422 ASSERT(eqe_evttype == TAVOR_EVT_COMM_ESTABLISHED ||
1427 1423 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1428 1424
1429 1425 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1430 1426 TNF_PROBE_0(tavor_comm_estbl_eq_overflow_condition,
1431 1427 TAVOR_TNF_ERROR, "");
1432 1428 tavor_eq_overflow_handler(state, eq, eqe);
1433 1429
1434 1430 TAVOR_TNF_EXIT(tavor_comm_estbl_handler);
1435 1431 return (DDI_FAILURE);
1436 1432 }
1437 1433
1438 1434 /* Get the QP handle from QP number in event descriptor */
1439 1435 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1440 1436 qp = tavor_qphdl_from_qpnum(state, qpnum);
1441 1437
1442 1438 /*
1443 1439 * If the QP handle is NULL, this is probably an indication
1444 1440 * that the QP has been freed already. In which case, we
1445 1441 * should not deliver this event.
1446 1442 *
1447 1443 * We also check that the QP number in the handle is the
1448 1444 * same as the QP number in the event queue entry. This
1449 1445 * extra check allows us to handle the case where a QP was
1450 1446 * freed and then allocated again in the time it took to
1451 1447 * handle the event queue processing. By constantly incrementing
1452 1448 * the non-constrained portion of the QP number every time
1453 1449 * a new QP is allocated, we mitigate (somewhat) the chance
1454 1450 * that a stale event could be passed to the client's QP
1455 1451 * handler.
1456 1452 *
1457 1453 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1458 1454 * means that we've have either received this event before we
1459 1455 * finished attaching to the IBTF or we've received it while we
1460 1456 * are in the process of detaching.
1461 1457 */
1462 1458 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1463 1459 (state->ts_ibtfpriv != NULL)) {
1464 1460 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1465 1461 type = IBT_EVENT_COM_EST_QP;
1466 1462
1467 1463 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1468 1464 } else {
1469 1465 TNF_PROBE_2(tavor_comm_estbl_handler_dropped_event,
1470 1466 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1471 1467 tnf_uint, hdl_qpnum, qpnum);
1472 1468 }
1473 1469
1474 1470 TAVOR_TNF_EXIT(tavor_comm_estbl_handler);
1475 1471 return (DDI_SUCCESS);
1476 1472 }
1477 1473
1478 1474
1479 1475 /*
1480 1476 * tavor_local_wq_cat_err_handler()
1481 1477 * Context: Only called from interrupt context
1482 1478 */
1483 1479 static int
1484 1480 tavor_local_wq_cat_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1485 1481 tavor_hw_eqe_t *eqe)
1486 1482 {
1487 1483 tavor_qphdl_t qp;
1488 1484 uint_t qpnum;
1489 1485 ibc_async_event_t event;
1490 1486 ibt_async_code_t type;
1491 1487 uint_t eqe_evttype;
1492 1488
1493 1489 TAVOR_TNF_ENTER(tavor_local_wq_cat_err_handler);
1494 1490
1495 1491 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1496 1492
1497 1493 ASSERT(eqe_evttype == TAVOR_EVT_LOCAL_WQ_CAT_ERROR ||
1498 1494 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1499 1495
1500 1496 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1501 1497 TNF_PROBE_0(tavor_local_wq_cat_err_eq_overflow_condition,
1502 1498 TAVOR_TNF_ERROR, "");
1503 1499 tavor_eq_overflow_handler(state, eq, eqe);
1504 1500
1505 1501 TAVOR_TNF_EXIT(tavor_local_wq_cat_err_handler);
1506 1502 return (DDI_FAILURE);
1507 1503 }
1508 1504
1509 1505 /* Get the QP handle from QP number in event descriptor */
1510 1506 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1511 1507 qp = tavor_qphdl_from_qpnum(state, qpnum);
1512 1508
1513 1509 /*
1514 1510 * If the QP handle is NULL, this is probably an indication
1515 1511 * that the QP has been freed already. In which case, we
1516 1512 * should not deliver this event.
1517 1513 *
1518 1514 * We also check that the QP number in the handle is the
1519 1515 * same as the QP number in the event queue entry. This
1520 1516 * extra check allows us to handle the case where a QP was
1521 1517 * freed and then allocated again in the time it took to
1522 1518 * handle the event queue processing. By constantly incrementing
1523 1519 * the non-constrained portion of the QP number every time
1524 1520 * a new QP is allocated, we mitigate (somewhat) the chance
1525 1521 * that a stale event could be passed to the client's QP
1526 1522 * handler.
1527 1523 *
1528 1524 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1529 1525 * means that we've have either received this event before we
1530 1526 * finished attaching to the IBTF or we've received it while we
1531 1527 * are in the process of detaching.
1532 1528 */
1533 1529 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1534 1530 (state->ts_ibtfpriv != NULL)) {
1535 1531 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1536 1532 type = IBT_ERROR_CATASTROPHIC_QP;
1537 1533
1538 1534 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1539 1535 } else {
1540 1536 TNF_PROBE_2(tavor_local_wq_cat_err_handler_dropped_event,
1541 1537 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1542 1538 tnf_uint, hdl_qpnum, qpnum);
1543 1539 }
1544 1540
1545 1541 TAVOR_TNF_EXIT(tavor_local_wq_cat_err_handler);
1546 1542 return (DDI_SUCCESS);
1547 1543 }
1548 1544
1549 1545
1550 1546 /*
1551 1547 * tavor_invreq_local_wq_err_handler()
1552 1548 * Context: Only called from interrupt context
1553 1549 */
1554 1550 static int
1555 1551 tavor_invreq_local_wq_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1556 1552 tavor_hw_eqe_t *eqe)
1557 1553 {
1558 1554 tavor_qphdl_t qp;
1559 1555 uint_t qpnum;
1560 1556 ibc_async_event_t event;
1561 1557 ibt_async_code_t type;
1562 1558 uint_t eqe_evttype;
1563 1559
1564 1560 TAVOR_TNF_ENTER(tavor_invreq_local_wq_err_handler);
1565 1561
1566 1562 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1567 1563
1568 1564 ASSERT(eqe_evttype == TAVOR_EVT_INV_REQ_LOCAL_WQ_ERROR ||
1569 1565 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1570 1566
1571 1567 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1572 1568 TNF_PROBE_0(tavor_invreq_local_wq_err_eq_overflow_condition,
1573 1569 TAVOR_TNF_ERROR, "");
1574 1570 tavor_eq_overflow_handler(state, eq, eqe);
1575 1571
1576 1572 TAVOR_TNF_EXIT(tavor_port_state_change_handler);
1577 1573 return (DDI_FAILURE);
1578 1574 }
1579 1575
1580 1576 /* Get the QP handle from QP number in event descriptor */
1581 1577 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1582 1578 qp = tavor_qphdl_from_qpnum(state, qpnum);
1583 1579
1584 1580 /*
1585 1581 * If the QP handle is NULL, this is probably an indication
1586 1582 * that the QP has been freed already. In which case, we
1587 1583 * should not deliver this event.
1588 1584 *
1589 1585 * We also check that the QP number in the handle is the
1590 1586 * same as the QP number in the event queue entry. This
1591 1587 * extra check allows us to handle the case where a QP was
1592 1588 * freed and then allocated again in the time it took to
1593 1589 * handle the event queue processing. By constantly incrementing
1594 1590 * the non-constrained portion of the QP number every time
1595 1591 * a new QP is allocated, we mitigate (somewhat) the chance
1596 1592 * that a stale event could be passed to the client's QP
1597 1593 * handler.
1598 1594 *
1599 1595 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1600 1596 * means that we've have either received this event before we
1601 1597 * finished attaching to the IBTF or we've received it while we
1602 1598 * are in the process of detaching.
1603 1599 */
1604 1600 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1605 1601 (state->ts_ibtfpriv != NULL)) {
1606 1602 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1607 1603 type = IBT_ERROR_INVALID_REQUEST_QP;
1608 1604
1609 1605 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1610 1606 } else {
1611 1607 TNF_PROBE_2(tavor_invreq_local_wq_err_handler_dropped_event,
1612 1608 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1613 1609 tnf_uint, hdl_qpnum, qpnum);
1614 1610 }
1615 1611
1616 1612 TAVOR_TNF_EXIT(tavor_invreq_local_wq_err_handler);
1617 1613 return (DDI_SUCCESS);
1618 1614 }
1619 1615
1620 1616
1621 1617 /*
1622 1618 * tavor_local_acc_vio_wq_err_handler()
1623 1619 * Context: Only called from interrupt context
1624 1620 */
1625 1621 static int
1626 1622 tavor_local_acc_vio_wq_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1627 1623 tavor_hw_eqe_t *eqe)
1628 1624 {
1629 1625 tavor_qphdl_t qp;
1630 1626 uint_t qpnum;
1631 1627 ibc_async_event_t event;
1632 1628 ibt_async_code_t type;
1633 1629 uint_t eqe_evttype;
1634 1630
1635 1631 TAVOR_TNF_ENTER(tavor_local_acc_vio_wq_err_handler);
1636 1632
1637 1633 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1638 1634
1639 1635 ASSERT(eqe_evttype == TAVOR_EVT_LOCAL_ACC_VIO_WQ_ERROR ||
1640 1636 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1641 1637
1642 1638 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1643 1639 TNF_PROBE_0(tavor_local_acc_vio_wq_err_eq_overflow_condition,
1644 1640 TAVOR_TNF_ERROR, "");
1645 1641 tavor_eq_overflow_handler(state, eq, eqe);
1646 1642
1647 1643 TAVOR_TNF_EXIT(tavor_local_acc_vio_wq_err_handler);
1648 1644 return (DDI_FAILURE);
1649 1645 }
1650 1646
1651 1647 /* Get the QP handle from QP number in event descriptor */
1652 1648 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1653 1649 qp = tavor_qphdl_from_qpnum(state, qpnum);
1654 1650
1655 1651 /*
1656 1652 * If the QP handle is NULL, this is probably an indication
1657 1653 * that the QP has been freed already. In which case, we
1658 1654 * should not deliver this event.
1659 1655 *
1660 1656 * We also check that the QP number in the handle is the
1661 1657 * same as the QP number in the event queue entry. This
1662 1658 * extra check allows us to handle the case where a QP was
1663 1659 * freed and then allocated again in the time it took to
1664 1660 * handle the event queue processing. By constantly incrementing
1665 1661 * the non-constrained portion of the QP number every time
1666 1662 * a new QP is allocated, we mitigate (somewhat) the chance
1667 1663 * that a stale event could be passed to the client's QP
1668 1664 * handler.
1669 1665 *
1670 1666 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1671 1667 * means that we've have either received this event before we
1672 1668 * finished attaching to the IBTF or we've received it while we
1673 1669 * are in the process of detaching.
1674 1670 */
1675 1671 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1676 1672 (state->ts_ibtfpriv != NULL)) {
1677 1673 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1678 1674 type = IBT_ERROR_ACCESS_VIOLATION_QP;
1679 1675
1680 1676 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1681 1677 } else {
1682 1678 TNF_PROBE_2(tavor_local_acc_vio_wq_err_handler_dropped_event,
1683 1679 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1684 1680 tnf_uint, hdl_qpnum, qpnum);
1685 1681 }
1686 1682
1687 1683 TAVOR_TNF_EXIT(tavor_local_acc_vio_wq_err_handler);
1688 1684 return (DDI_SUCCESS);
1689 1685 }
1690 1686
1691 1687
1692 1688 /*
1693 1689 * tavor_sendq_drained_handler()
1694 1690 * Context: Only called from interrupt context
1695 1691 */
1696 1692 static int
1697 1693 tavor_sendq_drained_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1698 1694 tavor_hw_eqe_t *eqe)
1699 1695 {
1700 1696 tavor_qphdl_t qp;
1701 1697 uint_t qpnum;
1702 1698 ibc_async_event_t event;
1703 1699 uint_t forward_sqd_event;
1704 1700 ibt_async_code_t type;
1705 1701 uint_t eqe_evttype;
1706 1702
1707 1703 TAVOR_TNF_ENTER(tavor_sendq_drained_handler);
1708 1704
1709 1705 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1710 1706
1711 1707 ASSERT(eqe_evttype == TAVOR_EVT_SEND_QUEUE_DRAINED ||
1712 1708 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1713 1709
1714 1710 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1715 1711 TNF_PROBE_0(tavor_sendq_drained_eq_overflow_condition,
1716 1712 TAVOR_TNF_ERROR, "");
1717 1713 tavor_eq_overflow_handler(state, eq, eqe);
1718 1714
1719 1715 TAVOR_TNF_EXIT(tavor_sendq_drained_handler);
1720 1716 return (DDI_FAILURE);
1721 1717 }
1722 1718
1723 1719 /* Get the QP handle from QP number in event descriptor */
1724 1720 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1725 1721 qp = tavor_qphdl_from_qpnum(state, qpnum);
1726 1722
1727 1723 /*
1728 1724 * If the QP handle is NULL, this is probably an indication
1729 1725 * that the QP has been freed already. In which case, we
1730 1726 * should not deliver this event.
1731 1727 *
1732 1728 * We also check that the QP number in the handle is the
1733 1729 * same as the QP number in the event queue entry. This
1734 1730 * extra check allows us to handle the case where a QP was
1735 1731 * freed and then allocated again in the time it took to
1736 1732 * handle the event queue processing. By constantly incrementing
1737 1733 * the non-constrained portion of the QP number every time
1738 1734 * a new QP is allocated, we mitigate (somewhat) the chance
1739 1735 * that a stale event could be passed to the client's QP
1740 1736 * handler.
1741 1737 *
1742 1738 * And then we check if "ts_ibtfpriv" is NULL. If it is then it
1743 1739 * means that we've have either received this event before we
1744 1740 * finished attaching to the IBTF or we've received it while we
1745 1741 * are in the process of detaching.
1746 1742 */
1747 1743 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1748 1744 (state->ts_ibtfpriv != NULL)) {
1749 1745 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1750 1746 type = IBT_EVENT_SQD;
1751 1747
1752 1748 /*
1753 1749 * Grab the QP lock and update the QP state to reflect that
1754 1750 * the Send Queue Drained event has arrived. Also determine
1755 1751 * whether the event is intended to be forwarded on to the
1756 1752 * consumer or not. This information is used below in
1757 1753 * determining whether or not to call the IBTF.
1758 1754 */
1759 1755 mutex_enter(&qp->qp_lock);
1760 1756 forward_sqd_event = qp->qp_forward_sqd_event;
1761 1757 qp->qp_forward_sqd_event = 0;
1762 1758 qp->qp_sqd_still_draining = 0;
1763 1759 mutex_exit(&qp->qp_lock);
1764 1760
1765 1761 if (forward_sqd_event != 0) {
1766 1762 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1767 1763 }
1768 1764 } else {
1769 1765 TNF_PROBE_2(tavor_sendq_drained_handler_dropped_event,
1770 1766 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1771 1767 tnf_uint, hdl_qpnum, qpnum);
1772 1768 }
1773 1769
1774 1770 TAVOR_TNF_EXIT(tavor_sendq_drained_handler);
1775 1771 return (DDI_SUCCESS);
1776 1772 }
1777 1773
1778 1774
1779 1775 /*
1780 1776 * tavor_path_mig_handler()
1781 1777 * Context: Only called from interrupt context
1782 1778 */
1783 1779 static int
1784 1780 tavor_path_mig_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1785 1781 tavor_hw_eqe_t *eqe)
1786 1782 {
1787 1783 tavor_qphdl_t qp;
1788 1784 uint_t qpnum;
1789 1785 ibc_async_event_t event;
1790 1786 ibt_async_code_t type;
1791 1787 uint_t eqe_evttype;
1792 1788
1793 1789 TAVOR_TNF_ENTER(tavor_path_mig_handler);
1794 1790
1795 1791 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1796 1792
1797 1793 ASSERT(eqe_evttype == TAVOR_EVT_PATH_MIGRATED ||
1798 1794 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1799 1795
1800 1796 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1801 1797 TNF_PROBE_0(tavor_path_mig_eq_overflow_condition,
1802 1798 TAVOR_TNF_ERROR, "");
1803 1799 tavor_eq_overflow_handler(state, eq, eqe);
1804 1800
1805 1801 TAVOR_TNF_EXIT(tavor_path_mig_handler);
1806 1802 return (DDI_FAILURE);
1807 1803 }
1808 1804
1809 1805 /* Get the QP handle from QP number in event descriptor */
1810 1806 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1811 1807 qp = tavor_qphdl_from_qpnum(state, qpnum);
1812 1808
1813 1809 /*
1814 1810 * If the QP handle is NULL, this is probably an indication
1815 1811 * that the QP has been freed already. In which case, we
1816 1812 * should not deliver this event.
1817 1813 *
1818 1814 * We also check that the QP number in the handle is the
1819 1815 * same as the QP number in the event queue entry. This
1820 1816 * extra check allows us to handle the case where a QP was
1821 1817 * freed and then allocated again in the time it took to
1822 1818 * handle the event queue processing. By constantly incrementing
1823 1819 * the non-constrained portion of the QP number every time
1824 1820 * a new QP is allocated, we mitigate (somewhat) the chance
1825 1821 * that a stale event could be passed to the client's QP
1826 1822 * handler.
1827 1823 *
1828 1824 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1829 1825 * means that we've have either received this event before we
1830 1826 * finished attaching to the IBTF or we've received it while we
1831 1827 * are in the process of detaching.
1832 1828 */
1833 1829 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1834 1830 (state->ts_ibtfpriv != NULL)) {
1835 1831 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1836 1832 type = IBT_EVENT_PATH_MIGRATED_QP;
1837 1833
1838 1834 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1839 1835 } else {
1840 1836 TNF_PROBE_2(tavor_path_mig_handler_dropped_event,
1841 1837 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1842 1838 tnf_uint, hdl_qpnum, qpnum);
1843 1839 }
1844 1840
1845 1841 TAVOR_TNF_EXIT(tavor_path_mig_handler);
1846 1842 return (DDI_SUCCESS);
1847 1843 }
1848 1844
1849 1845
1850 1846 /*
1851 1847 * tavor_path_mig_err_handler()
1852 1848 * Context: Only called from interrupt context
1853 1849 */
1854 1850 static int
1855 1851 tavor_path_mig_err_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1856 1852 tavor_hw_eqe_t *eqe)
1857 1853 {
1858 1854 tavor_qphdl_t qp;
1859 1855 uint_t qpnum;
1860 1856 ibc_async_event_t event;
1861 1857 ibt_async_code_t type;
1862 1858 uint_t eqe_evttype;
1863 1859
1864 1860 TAVOR_TNF_ENTER(tavor_path_mig_err_handler);
1865 1861
1866 1862 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1867 1863
1868 1864 ASSERT(eqe_evttype == TAVOR_EVT_PATH_MIGRATE_FAILED ||
1869 1865 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1870 1866
1871 1867 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1872 1868 TNF_PROBE_0(tavor_path_mig_err_eq_overflow_condition,
1873 1869 TAVOR_TNF_ERROR, "");
1874 1870 tavor_eq_overflow_handler(state, eq, eqe);
1875 1871
1876 1872 TAVOR_TNF_EXIT(tavor_path_mig_err_handler);
1877 1873 return (DDI_FAILURE);
1878 1874 }
1879 1875
1880 1876 /* Get the QP handle from QP number in event descriptor */
1881 1877 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1882 1878 qp = tavor_qphdl_from_qpnum(state, qpnum);
1883 1879
1884 1880 /*
1885 1881 * If the QP handle is NULL, this is probably an indication
1886 1882 * that the QP has been freed already. In which case, we
1887 1883 * should not deliver this event.
1888 1884 *
1889 1885 * We also check that the QP number in the handle is the
1890 1886 * same as the QP number in the event queue entry. This
1891 1887 * extra check allows us to handle the case where a QP was
1892 1888 * freed and then allocated again in the time it took to
1893 1889 * handle the event queue processing. By constantly incrementing
1894 1890 * the non-constrained portion of the QP number every time
1895 1891 * a new QP is allocated, we mitigate (somewhat) the chance
1896 1892 * that a stale event could be passed to the client's QP
1897 1893 * handler.
1898 1894 *
1899 1895 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1900 1896 * means that we've have either received this event before we
1901 1897 * finished attaching to the IBTF or we've received it while we
1902 1898 * are in the process of detaching.
1903 1899 */
1904 1900 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1905 1901 (state->ts_ibtfpriv != NULL)) {
1906 1902 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
1907 1903 type = IBT_ERROR_PATH_MIGRATE_REQ_QP;
1908 1904
1909 1905 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1910 1906 } else {
1911 1907 TNF_PROBE_2(tavor_path_mig_err_handler_dropped_event,
1912 1908 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1913 1909 tnf_uint, hdl_qpnum, qpnum);
1914 1910 }
1915 1911
1916 1912 TAVOR_TNF_EXIT(tavor_path_mig_err_handler);
1917 1913 return (DDI_SUCCESS);
1918 1914 }
1919 1915
1920 1916
1921 1917 /*
1922 1918 * tavor_srq_catastrophic_handler()
1923 1919 * Context: Only called from interrupt context
1924 1920 */
1925 1921 static int
1926 1922 tavor_srq_catastrophic_handler(tavor_state_t *state, tavor_eqhdl_t eq,
1927 1923 tavor_hw_eqe_t *eqe)
1928 1924 {
1929 1925 tavor_qphdl_t qp;
1930 1926 uint_t qpnum;
1931 1927 ibc_async_event_t event;
1932 1928 ibt_async_code_t type;
1933 1929 uint_t eqe_evttype;
1934 1930
1935 1931 TAVOR_TNF_ENTER(tavor_srq_catastrophic_handler);
1936 1932
1937 1933 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
1938 1934
1939 1935 ASSERT(eqe_evttype == TAVOR_EVT_SRQ_CATASTROPHIC_ERROR ||
1940 1936 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
1941 1937
1942 1938 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
1943 1939 TNF_PROBE_0(tavor_srq_catastrophic_overflow_condition,
1944 1940 TAVOR_TNF_ERROR, "");
1945 1941 tavor_eq_overflow_handler(state, eq, eqe);
1946 1942
1947 1943 TAVOR_TNF_EXIT(tavor_srq_catastrophic_handler);
1948 1944 return (DDI_FAILURE);
1949 1945 }
1950 1946
1951 1947 /* Get the QP handle from QP number in event descriptor */
1952 1948 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
1953 1949 qp = tavor_qphdl_from_qpnum(state, qpnum);
1954 1950
1955 1951 /*
1956 1952 * If the QP handle is NULL, this is probably an indication
1957 1953 * that the QP has been freed already. In which case, we
1958 1954 * should not deliver this event.
1959 1955 *
1960 1956 * We also check that the QP number in the handle is the
1961 1957 * same as the QP number in the event queue entry. This
1962 1958 * extra check allows us to handle the case where a QP was
1963 1959 * freed and then allocated again in the time it took to
1964 1960 * handle the event queue processing. By constantly incrementing
1965 1961 * the non-constrained portion of the QP number every time
1966 1962 * a new QP is allocated, we mitigate (somewhat) the chance
1967 1963 * that a stale event could be passed to the client's QP
1968 1964 * handler.
1969 1965 *
1970 1966 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
1971 1967 * means that we've have either received this event before we
1972 1968 * finished attaching to the IBTF or we've received it while we
1973 1969 * are in the process of detaching.
1974 1970 */
1975 1971 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
1976 1972 (state->ts_ibtfpriv != NULL)) {
1977 1973 event.ev_srq_hdl = (ibt_srq_hdl_t)qp->qp_srqhdl->srq_hdlrarg;
1978 1974 type = IBT_ERROR_CATASTROPHIC_SRQ;
1979 1975
1980 1976 mutex_enter(&qp->qp_srqhdl->srq_lock);
1981 1977 qp->qp_srqhdl->srq_state = TAVOR_SRQ_STATE_ERROR;
1982 1978 mutex_exit(&qp->qp_srqhdl->srq_lock);
1983 1979
1984 1980 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
1985 1981 } else {
1986 1982 TNF_PROBE_2(tavor_srq_catastrophic_handler_dropped_event,
1987 1983 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
1988 1984 tnf_uint, hdl_qpnum, qpnum);
1989 1985 }
1990 1986
1991 1987 TAVOR_TNF_EXIT(tavor_srq_catastrophic_handler);
1992 1988 return (DDI_SUCCESS);
1993 1989 }
1994 1990
1995 1991
1996 1992 /*
1997 1993 * tavor_srq_last_wqe_reached_handler()
1998 1994 * Context: Only called from interrupt context
1999 1995 */
2000 1996 static int
2001 1997 tavor_srq_last_wqe_reached_handler(tavor_state_t *state, tavor_eqhdl_t eq,
2002 1998 tavor_hw_eqe_t *eqe)
2003 1999 {
2004 2000 tavor_qphdl_t qp;
2005 2001 uint_t qpnum;
2006 2002 ibc_async_event_t event;
2007 2003 ibt_async_code_t type;
2008 2004 uint_t eqe_evttype;
2009 2005
2010 2006 TAVOR_TNF_ENTER(tavor_srq_last_wqe_reached_handler);
2011 2007
2012 2008 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
2013 2009
2014 2010 ASSERT(eqe_evttype == TAVOR_EVT_SRQ_LAST_WQE_REACHED ||
2015 2011 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
2016 2012
2017 2013 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
2018 2014 TNF_PROBE_0(tavor_srq_last_wqe_reached_over_condition,
2019 2015 TAVOR_TNF_ERROR, "");
2020 2016 tavor_eq_overflow_handler(state, eq, eqe);
2021 2017
2022 2018 TAVOR_TNF_EXIT(tavor_srq_last_wqe_reached_handler);
2023 2019 return (DDI_FAILURE);
2024 2020 }
2025 2021
2026 2022 /* Get the QP handle from QP number in event descriptor */
2027 2023 qpnum = TAVOR_EQE_QPNUM_GET(eq, eqe);
2028 2024 qp = tavor_qphdl_from_qpnum(state, qpnum);
2029 2025
2030 2026 /*
2031 2027 * If the QP handle is NULL, this is probably an indication
2032 2028 * that the QP has been freed already. In which case, we
2033 2029 * should not deliver this event.
2034 2030 *
2035 2031 * We also check that the QP number in the handle is the
2036 2032 * same as the QP number in the event queue entry. This
2037 2033 * extra check allows us to handle the case where a QP was
2038 2034 * freed and then allocated again in the time it took to
2039 2035 * handle the event queue processing. By constantly incrementing
2040 2036 * the non-constrained portion of the QP number every time
2041 2037 * a new QP is allocated, we mitigate (somewhat) the chance
2042 2038 * that a stale event could be passed to the client's QP
2043 2039 * handler.
2044 2040 *
2045 2041 * Lastly, we check if "ts_ibtfpriv" is NULL. If it is then it
2046 2042 * means that we've have either received this event before we
2047 2043 * finished attaching to the IBTF or we've received it while we
2048 2044 * are in the process of detaching.
2049 2045 */
2050 2046 if ((qp != NULL) && (qp->qp_qpnum == qpnum) &&
2051 2047 (state->ts_ibtfpriv != NULL)) {
2052 2048 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg;
2053 2049 type = IBT_EVENT_EMPTY_CHAN;
2054 2050
2055 2051 TAVOR_DO_IBTF_ASYNC_CALLB(state, type, &event);
2056 2052 } else {
2057 2053 TNF_PROBE_2(tavor_srq_last_wqe_reached_dropped_event,
2058 2054 TAVOR_TNF_ERROR, "", tnf_uint, ev_qpnum, qpnum,
2059 2055 tnf_uint, hdl_qpnum, qpnum);
2060 2056 }
2061 2057
2062 2058 TAVOR_TNF_EXIT(tavor_srq_last_wqe_reached_handler);
2063 2059 return (DDI_SUCCESS);
2064 2060 }
2065 2061
2066 2062
2067 2063 /*
2068 2064 * tavor_ecc_detection_handler()
2069 2065 * Context: Only called from interrupt context
2070 2066 */
2071 2067 static int
2072 2068 tavor_ecc_detection_handler(tavor_state_t *state, tavor_eqhdl_t eq,
2073 2069 tavor_hw_eqe_t *eqe)
2074 2070 {
2075 2071 uint_t eqe_evttype;
2076 2072 uint_t data;
2077 2073 int i;
2078 2074
2079 2075 TAVOR_TNF_ENTER(tavor_ecc_detection_handler);
2080 2076
2081 2077 eqe_evttype = TAVOR_EQE_EVTTYPE_GET(eq, eqe);
2082 2078
2083 2079 ASSERT(eqe_evttype == TAVOR_EVT_ECC_DETECTION ||
2084 2080 eqe_evttype == TAVOR_EVT_EQ_OVERFLOW);
2085 2081
2086 2082 if (eqe_evttype == TAVOR_EVT_EQ_OVERFLOW) {
2087 2083 TNF_PROBE_0(tavor_ecc_detection_eq_overflow_condition,
2088 2084 TAVOR_TNF_ERROR, "");
2089 2085 tavor_eq_overflow_handler(state, eq, eqe);
2090 2086
2091 2087 TAVOR_TNF_EXIT(tavor_ecc_detection_handler);
2092 2088 return (DDI_FAILURE);
2093 2089 }
2094 2090
2095 2091 /*
2096 2092 * The "ECC Detection Event" indicates that a correctable single-bit
2097 2093 * has occurred with the attached DDR. The EQE provides some
2098 2094 * additional information about the errored EQ. So we print a warning
2099 2095 * message here along with that additional information.
2100 2096 */
2101 2097 TAVOR_WARNING(state, "ECC Correctable Error Event Detected");
2102 2098 for (i = 0; i < sizeof (tavor_hw_eqe_t) >> 2; i++) {
2103 2099 data = ((uint_t *)eqe)[i];
2104 2100 cmn_err(CE_CONT, "! EQE[%02x]: %08x\n", i, data);
2105 2101 }
2106 2102
2107 2103 TAVOR_TNF_EXIT(tavor_ecc_detection_handler);
2108 2104 return (DDI_SUCCESS);
2109 2105 }
2110 2106
2111 2107
2112 2108 /*
2113 2109 * tavor_eq_overflow_handler()
2114 2110 * Context: Only called from interrupt context
2115 2111 */
2116 2112 void
2117 2113 tavor_eq_overflow_handler(tavor_state_t *state, tavor_eqhdl_t eq,
2118 2114 tavor_hw_eqe_t *eqe)
2119 2115 {
2120 2116 uint_t error_type, data;
2121 2117
2122 2118 TAVOR_TNF_ENTER(tavor_eq_overflow_handler);
2123 2119
2124 2120 ASSERT(TAVOR_EQE_EVTTYPE_GET(eq, eqe) == TAVOR_EVT_EQ_OVERFLOW);
2125 2121
2126 2122 /*
2127 2123 * The "Event Queue Overflow Event" indicates that something has
2128 2124 * probably gone seriously wrong with some hardware (or, perhaps,
2129 2125 * with the software... though it's unlikely in this case). The EQE
2130 2126 * provides some additional information about the errored EQ. So we
2131 2127 * print a warning message here along with that additional information.
2132 2128 */
2133 2129 error_type = TAVOR_EQE_OPERRTYPE_GET(eq, eqe);
2134 2130 data = TAVOR_EQE_OPERRDATA_GET(eq, eqe);
2135 2131
2136 2132 TAVOR_WARNING(state, "Event Queue overflow");
2137 2133 cmn_err(CE_CONT, " Error type: %02x, data: %08x\n", error_type, data);
2138 2134
2139 2135 TAVOR_TNF_EXIT(tavor_eq_overflow_handler);
2140 2136 }
2141 2137
2142 2138
2143 2139 /*
2144 2140 * tavor_no_eqhandler
2145 2141 * Context: Only called from interrupt context
2146 2142 */
2147 2143 /* ARGSUSED */
2148 2144 static int
2149 2145 tavor_no_eqhandler(tavor_state_t *state, tavor_eqhdl_t eq,
2150 2146 tavor_hw_eqe_t *eqe)
2151 2147 {
2152 2148 uint_t data;
2153 2149 int i;
2154 2150
2155 2151 TAVOR_TNF_ENTER(tavor_no_eqhandler);
2156 2152
2157 2153 /*
2158 2154 * This "unexpected event" handler (or "catch-all" handler) will
2159 2155 * receive all events for which no other handler has been registered.
2160 2156 * If we end up here, then something has probably gone seriously wrong
2161 2157 * with the Tavor hardware (or, perhaps, with the software... though
2162 2158 * it's unlikely in this case). The EQE provides all the information
2163 2159 * about the event. So we print a warning message here along with
2164 2160 * the contents of the EQE.
2165 2161 */
2166 2162 TAVOR_WARNING(state, "Unexpected Event handler");
2167 2163 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n",
2168 2164 TAVOR_EQE_EVTTYPE_GET(eq, eqe), TAVOR_EQE_EVTSUBTYPE_GET(eq, eqe));
2169 2165 for (i = 0; i < sizeof (tavor_hw_eqe_t) >> 2; i++) {
2170 2166 data = ((uint_t *)eqe)[i];
2171 2167 cmn_err(CE_CONT, " EQE[%02x]: %08x\n", i, data);
2172 2168 }
2173 2169
2174 2170 TAVOR_TNF_EXIT(tavor_no_eqhandler);
2175 2171 return (DDI_SUCCESS);
2176 2172 }
↓ open down ↓ |
1151 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX