Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_cq.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_cq.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon_cq.c
28 28 * Hermon Completion Queue Processing Routines
29 29 *
30 30 * Implements all the routines necessary for allocating, freeing, resizing,
31 31 * and handling the completion type events that the Hermon hardware can
32 32 * generate.
33 33 */
34 34
35 35 #include <sys/types.h>
36 36 #include <sys/conf.h>
37 37 #include <sys/ddi.h>
38 38 #include <sys/sunddi.h>
39 39 #include <sys/modctl.h>
40 40 #include <sys/bitmap.h>
41 41 #include <sys/sysmacros.h>
42 42
43 43 #include <sys/ib/adapters/hermon/hermon.h>
44 44
45 45 int hermon_should_panic = 0; /* debugging aid */
46 46
47 47 #define hermon_cq_update_ci_doorbell(cq) \
48 48 /* Build the doorbell record data (low 24 bits only) */ \
49 49 HERMON_UAR_DB_RECORD_WRITE(cq->cq_arm_ci_vdbr, \
50 50 cq->cq_consindx & 0x00FFFFFF)
51 51
52 52 static int hermon_cq_arm_doorbell(hermon_state_t *state, hermon_cqhdl_t cq,
53 53 uint_t cmd);
54 54 #pragma inline(hermon_cq_arm_doorbell)
55 55 static void hermon_arm_cq_dbr_init(hermon_dbr_t *cq_arm_dbr);
56 56 #pragma inline(hermon_arm_cq_dbr_init)
57 57 static void hermon_cq_cqe_consume(hermon_state_t *state, hermon_cqhdl_t cq,
58 58 hermon_hw_cqe_t *cqe, ibt_wc_t *wc);
59 59 static void hermon_cq_errcqe_consume(hermon_state_t *state, hermon_cqhdl_t cq,
60 60 hermon_hw_cqe_t *cqe, ibt_wc_t *wc);
61 61
62 62
63 63 /*
64 64 * hermon_cq_alloc()
65 65 * Context: Can be called only from user or kernel context.
66 66 */
67 67 int
68 68 hermon_cq_alloc(hermon_state_t *state, ibt_cq_hdl_t ibt_cqhdl,
69 69 ibt_cq_attr_t *cq_attr, uint_t *actual_size, hermon_cqhdl_t *cqhdl,
70 70 uint_t sleepflag)
71 71 {
72 72 hermon_rsrc_t *cqc, *rsrc;
73 73 hermon_umap_db_entry_t *umapdb;
74 74 hermon_hw_cqc_t cqc_entry;
75 75 hermon_cqhdl_t cq;
76 76 ibt_mr_attr_t mr_attr;
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
77 77 hermon_mr_options_t op;
78 78 hermon_pdhdl_t pd;
79 79 hermon_mrhdl_t mr;
80 80 hermon_hw_cqe_t *buf;
81 81 uint64_t value;
82 82 uint32_t log_cq_size, uarpg;
83 83 uint_t cq_is_umap;
84 84 uint32_t status, flag;
85 85 hermon_cq_sched_t *cq_schedp;
86 86
87 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq_attr))
88 -
89 87 /*
90 88 * Determine whether CQ is being allocated for userland access or
91 89 * whether it is being allocated for kernel access. If the CQ is
92 90 * being allocated for userland access, then lookup the UAR
93 91 * page number for the current process. Note: If this is not found
94 92 * (e.g. if the process has not previously open()'d the Hermon driver),
95 93 * then an error is returned.
96 94 */
97 95 cq_is_umap = (cq_attr->cq_flags & IBT_CQ_USER_MAP) ? 1 : 0;
98 96 if (cq_is_umap) {
99 97 status = hermon_umap_db_find(state->hs_instance, ddi_get_pid(),
100 98 MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
101 99 if (status != DDI_SUCCESS) {
102 100 status = IBT_INVALID_PARAM;
103 101 goto cqalloc_fail;
104 102 }
105 103 uarpg = ((hermon_rsrc_t *)(uintptr_t)value)->hr_indx;
106 104 } else {
107 105 uarpg = state->hs_kernel_uar_index;
108 106 }
109 107
110 108 /* Use the internal protection domain (PD) for setting up CQs */
111 109 pd = state->hs_pdhdl_internal;
112 110
113 111 /* Increment the reference count on the protection domain (PD) */
114 112 hermon_pd_refcnt_inc(pd);
115 113
116 114 /*
117 115 * Allocate an CQ context entry. This will be filled in with all
118 116 * the necessary parameters to define the Completion Queue. And then
119 117 * ownership will be passed to the hardware in the final step
120 118 * below. If we fail here, we must undo the protection domain
121 119 * reference count.
122 120 */
123 121 status = hermon_rsrc_alloc(state, HERMON_CQC, 1, sleepflag, &cqc);
124 122 if (status != DDI_SUCCESS) {
125 123 status = IBT_INSUFF_RESOURCE;
126 124 goto cqalloc_fail1;
127 125 }
128 126
129 127 /*
130 128 * Allocate the software structure for tracking the completion queue
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
131 129 * (i.e. the Hermon Completion Queue handle). If we fail here, we must
132 130 * undo the protection domain reference count and the previous
133 131 * resource allocation.
134 132 */
135 133 status = hermon_rsrc_alloc(state, HERMON_CQHDL, 1, sleepflag, &rsrc);
136 134 if (status != DDI_SUCCESS) {
137 135 status = IBT_INSUFF_RESOURCE;
138 136 goto cqalloc_fail2;
139 137 }
140 138 cq = (hermon_cqhdl_t)rsrc->hr_addr;
141 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq))
142 139 cq->cq_is_umap = cq_is_umap;
143 140 cq->cq_cqnum = cqc->hr_indx; /* just use index, implicit in Hermon */
144 141 cq->cq_intmod_count = 0;
145 142 cq->cq_intmod_usec = 0;
146 143
147 144 /*
148 145 * If this will be a user-mappable CQ, then allocate an entry for
149 146 * the "userland resources database". This will later be added to
150 147 * the database (after all further CQ operations are successful).
151 148 * If we fail here, we must undo the reference counts and the
152 149 * previous resource allocation.
153 150 */
154 151 if (cq->cq_is_umap) {
155 152 umapdb = hermon_umap_db_alloc(state->hs_instance, cq->cq_cqnum,
156 153 MLNX_UMAP_CQMEM_RSRC, (uint64_t)(uintptr_t)rsrc);
157 154 if (umapdb == NULL) {
158 155 status = IBT_INSUFF_RESOURCE;
159 156 goto cqalloc_fail3;
160 157 }
161 158 }
162 159
163 160
164 161 /*
165 162 * Allocate the doorbell record. We'll need one for the CQ, handling
166 163 * both consumer index (SET CI) and the CQ state (CQ ARM).
167 164 */
168 165
169 166 status = hermon_dbr_alloc(state, uarpg, &cq->cq_arm_ci_dbr_acchdl,
170 167 &cq->cq_arm_ci_vdbr, &cq->cq_arm_ci_pdbr, &cq->cq_dbr_mapoffset);
171 168 if (status != DDI_SUCCESS) {
172 169 status = IBT_INSUFF_RESOURCE;
173 170 goto cqalloc_fail4;
174 171 }
175 172
176 173 /*
177 174 * Calculate the appropriate size for the completion queue.
178 175 * Note: All Hermon CQs must be a power-of-2 minus 1 in size. Also
179 176 * they may not be any smaller than HERMON_CQ_MIN_SIZE. This step is
180 177 * to round the requested size up to the next highest power-of-2
181 178 */
182 179 cq_attr->cq_size = max(cq_attr->cq_size, HERMON_CQ_MIN_SIZE);
183 180 log_cq_size = highbit(cq_attr->cq_size);
184 181
185 182 /*
186 183 * Next we verify that the rounded-up size is valid (i.e. consistent
187 184 * with the device limits and/or software-configured limits)
188 185 */
189 186 if (log_cq_size > state->hs_cfg_profile->cp_log_max_cq_sz) {
190 187 status = IBT_HCA_CQ_EXCEEDED;
191 188 goto cqalloc_fail4a;
192 189 }
193 190
194 191 /*
195 192 * Allocate the memory for Completion Queue.
196 193 *
197 194 * Note: Although we use the common queue allocation routine, we
198 195 * always specify HERMON_QUEUE_LOCATION_NORMAL (i.e. CQ located in
199 196 * kernel system memory) for kernel CQs because it would be
200 197 * inefficient to have CQs located in DDR memory. This is primarily
201 198 * because CQs are read from (by software) more than they are written
202 199 * to. (We always specify HERMON_QUEUE_LOCATION_USERLAND for all
203 200 * user-mappable CQs for a similar reason.)
204 201 * It is also worth noting that, unlike Hermon QP work queues,
205 202 * completion queues do not have the same strict alignment
206 203 * requirements. It is sufficient for the CQ memory to be both
207 204 * aligned to and bound to addresses which are a multiple of CQE size.
208 205 */
209 206 cq->cq_cqinfo.qa_size = (1 << log_cq_size) * sizeof (hermon_hw_cqe_t);
210 207
211 208 cq->cq_cqinfo.qa_alloc_align = PAGESIZE;
212 209 cq->cq_cqinfo.qa_bind_align = PAGESIZE;
213 210 if (cq->cq_is_umap) {
214 211 cq->cq_cqinfo.qa_location = HERMON_QUEUE_LOCATION_USERLAND;
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
215 212 } else {
216 213 cq->cq_cqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
217 214 hermon_arm_cq_dbr_init(cq->cq_arm_ci_vdbr);
218 215 }
219 216 status = hermon_queue_alloc(state, &cq->cq_cqinfo, sleepflag);
220 217 if (status != DDI_SUCCESS) {
221 218 status = IBT_INSUFF_RESOURCE;
222 219 goto cqalloc_fail4;
223 220 }
224 221 buf = (hermon_hw_cqe_t *)cq->cq_cqinfo.qa_buf_aligned;
225 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))
226 222
227 223 /*
228 224 * The ownership bit of the CQE's is set by the HW during the process
229 225 * of transferrring ownership of the CQ (PRM 09.35c, 14.2.1, note D1
230 226 *
231 227 */
232 228
233 229 /*
234 230 * Register the memory for the CQ. The memory for the CQ must
235 231 * be registered in the Hermon TPT tables. This gives us the LKey
236 232 * to specify in the CQ context below. Note: If this is a user-
237 233 * mappable CQ, then we will force DDI_DMA_CONSISTENT mapping.
238 234 */
239 235 flag = (sleepflag == HERMON_SLEEP) ? IBT_MR_SLEEP : IBT_MR_NOSLEEP;
240 236 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
241 237 mr_attr.mr_len = cq->cq_cqinfo.qa_size;
242 238 mr_attr.mr_as = NULL;
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
243 239 mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
244 240 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
245 241 op.mro_bind_dmahdl = cq->cq_cqinfo.qa_dmahdl;
246 242 op.mro_bind_override_addr = 0;
247 243 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
248 244 HERMON_CQ_CMPT);
249 245 if (status != DDI_SUCCESS) {
250 246 status = IBT_INSUFF_RESOURCE;
251 247 goto cqalloc_fail5;
252 248 }
253 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
254 249
255 250 cq->cq_erreqnum = HERMON_CQ_ERREQNUM_GET(state);
256 251 if (cq_attr->cq_flags & IBT_CQ_HID) {
257 252 if (!HERMON_HID_VALID(state, cq_attr->cq_hid)) {
258 253 IBTF_DPRINTF_L2("CQalloc", "bad handler id 0x%x",
259 254 cq_attr->cq_hid);
260 255 status = IBT_INVALID_PARAM;
261 256 goto cqalloc_fail5;
262 257 }
263 258 cq->cq_eqnum = HERMON_HID_TO_EQNUM(state, cq_attr->cq_hid);
264 259 IBTF_DPRINTF_L2("cqalloc", "hid: eqn %d", cq->cq_eqnum);
265 260 } else {
266 261 cq_schedp = (hermon_cq_sched_t *)cq_attr->cq_sched;
267 262 if (cq_schedp == NULL) {
268 263 cq_schedp = &state->hs_cq_sched_default;
269 264 } else if (cq_schedp != &state->hs_cq_sched_default) {
270 265 int i;
271 266 hermon_cq_sched_t *tmp;
272 267
273 268 tmp = state->hs_cq_sched_array;
274 269 for (i = 0; i < state->hs_cq_sched_array_size; i++)
275 270 if (cq_schedp == &tmp[i])
276 271 break; /* found it */
277 272 if (i >= state->hs_cq_sched_array_size) {
278 273 cmn_err(CE_CONT, "!Invalid cq_sched argument: "
279 274 "ignored\n");
280 275 cq_schedp = &state->hs_cq_sched_default;
281 276 }
282 277 }
283 278 cq->cq_eqnum = HERMON_HID_TO_EQNUM(state,
284 279 HERMON_CQSCHED_NEXT_HID(cq_schedp));
285 280 IBTF_DPRINTF_L2("cqalloc", "sched: first-1 %d, len %d, "
286 281 "eqn %d", cq_schedp->cqs_start_hid - 1,
287 282 cq_schedp->cqs_len, cq->cq_eqnum);
288 283 }
289 284
290 285 /*
291 286 * Fill in the CQC entry. This is the final step before passing
292 287 * ownership of the CQC entry to the Hermon hardware. We use all of
293 288 * the information collected/calculated above to fill in the
294 289 * requisite portions of the CQC. Note: If this CQ is going to be
295 290 * used for userland access, then we need to set the UAR page number
296 291 * appropriately (otherwise it's a "don't care")
297 292 */
298 293 bzero(&cqc_entry, sizeof (hermon_hw_cqc_t));
299 294
300 295 cqc_entry.state = HERMON_CQ_DISARMED;
301 296 cqc_entry.pg_offs = cq->cq_cqinfo.qa_pgoffs >> 5;
302 297 cqc_entry.log_cq_sz = log_cq_size;
303 298 cqc_entry.usr_page = uarpg;
304 299 cqc_entry.c_eqn = cq->cq_eqnum;
305 300 cqc_entry.log2_pgsz = mr->mr_log2_pgsz;
306 301 cqc_entry.mtt_base_addh = (uint32_t)((mr->mr_mttaddr >> 32) & 0xFF);
307 302 cqc_entry.mtt_base_addl = mr->mr_mttaddr >> 3;
308 303 cqc_entry.dbr_addrh = (uint32_t)((uint64_t)cq->cq_arm_ci_pdbr >> 32);
309 304 cqc_entry.dbr_addrl = (uint32_t)((uint64_t)cq->cq_arm_ci_pdbr >> 3);
310 305
311 306 /*
312 307 * Write the CQC entry to hardware - we pass ownership of
313 308 * the entry to the hardware (using the Hermon SW2HW_CQ firmware
314 309 * command). Note: In general, this operation shouldn't fail. But
315 310 * if it does, we have to undo everything we've done above before
316 311 * returning error.
317 312 */
318 313 status = hermon_cmn_ownership_cmd_post(state, SW2HW_CQ, &cqc_entry,
319 314 sizeof (hermon_hw_cqc_t), cq->cq_cqnum, sleepflag);
320 315 if (status != HERMON_CMD_SUCCESS) {
321 316 cmn_err(CE_CONT, "Hermon: SW2HW_CQ command failed: %08x\n",
322 317 status);
323 318 if (status == HERMON_CMD_INVALID_STATUS) {
324 319 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
325 320 }
326 321 status = ibc_get_ci_failure(0);
327 322 goto cqalloc_fail6;
328 323 }
329 324
330 325 /*
331 326 * Fill in the rest of the Hermon Completion Queue handle. Having
332 327 * successfully transferred ownership of the CQC, we can update the
333 328 * following fields for use in further operations on the CQ.
334 329 */
335 330 cq->cq_resize_hdl = 0;
336 331 cq->cq_cqcrsrcp = cqc;
337 332 cq->cq_rsrcp = rsrc;
338 333 cq->cq_consindx = 0;
339 334 /* least restrictive */
340 335 cq->cq_buf = buf;
341 336 cq->cq_bufsz = (1 << log_cq_size);
342 337 cq->cq_log_cqsz = log_cq_size;
343 338 cq->cq_mrhdl = mr;
344 339 cq->cq_refcnt = 0;
345 340 cq->cq_is_special = 0;
346 341 cq->cq_uarpg = uarpg;
347 342 cq->cq_umap_dhp = (devmap_cookie_t)NULL;
348 343 avl_create(&cq->cq_wrid_wqhdr_avl_tree, hermon_wrid_workq_compare,
349 344 sizeof (struct hermon_workq_avl_s),
350 345 offsetof(struct hermon_workq_avl_s, wqa_link));
351 346
352 347 cq->cq_hdlrarg = (void *)ibt_cqhdl;
353 348
354 349 /*
355 350 * Put CQ handle in Hermon CQNum-to-CQHdl list. Then fill in the
356 351 * "actual_size" and "cqhdl" and return success
357 352 */
358 353 hermon_icm_set_num_to_hdl(state, HERMON_CQC, cqc->hr_indx, cq);
359 354
360 355 /*
361 356 * If this is a user-mappable CQ, then we need to insert the previously
362 357 * allocated entry into the "userland resources database". This will
363 358 * allow for later lookup during devmap() (i.e. mmap()) calls.
364 359 */
365 360 if (cq->cq_is_umap) {
366 361 hermon_umap_db_add(umapdb);
367 362 }
368 363
369 364 /*
370 365 * Fill in the return arguments (if necessary). This includes the
371 366 * real completion queue size.
372 367 */
373 368 if (actual_size != NULL) {
374 369 *actual_size = (1 << log_cq_size) - 1;
375 370 }
376 371 *cqhdl = cq;
377 372
378 373 return (DDI_SUCCESS);
379 374
380 375 /*
381 376 * The following is cleanup for all possible failure cases in this routine
382 377 */
383 378 cqalloc_fail6:
384 379 if (hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
385 380 sleepflag) != DDI_SUCCESS) {
386 381 HERMON_WARNING(state, "failed to deregister CQ memory");
387 382 }
388 383 cqalloc_fail5:
389 384 hermon_queue_free(&cq->cq_cqinfo);
390 385 cqalloc_fail4a:
391 386 hermon_dbr_free(state, uarpg, cq->cq_arm_ci_vdbr);
392 387 cqalloc_fail4:
393 388 if (cq_is_umap) {
394 389 hermon_umap_db_free(umapdb);
395 390 }
396 391 cqalloc_fail3:
397 392 hermon_rsrc_free(state, &rsrc);
398 393 cqalloc_fail2:
399 394 hermon_rsrc_free(state, &cqc);
400 395 cqalloc_fail1:
401 396 hermon_pd_refcnt_dec(pd);
402 397 cqalloc_fail:
403 398 return (status);
404 399 }
405 400
406 401
407 402 /*
408 403 * hermon_cq_free()
409 404 * Context: Can be called only from user or kernel context.
410 405 */
411 406 /* ARGSUSED */
412 407 int
413 408 hermon_cq_free(hermon_state_t *state, hermon_cqhdl_t *cqhdl, uint_t sleepflag)
414 409 {
415 410 hermon_rsrc_t *cqc, *rsrc;
416 411 hermon_umap_db_entry_t *umapdb;
417 412 hermon_hw_cqc_t cqc_entry;
418 413 hermon_pdhdl_t pd;
419 414 hermon_mrhdl_t mr;
420 415 hermon_cqhdl_t cq, resize;
421 416 uint32_t cqnum;
422 417 uint64_t value;
423 418 uint_t maxprot;
424 419 int status;
425 420
426 421 /*
427 422 * Pull all the necessary information from the Hermon Completion Queue
428 423 * handle. This is necessary here because the resource for the
429 424 * CQ handle is going to be freed up as part of this operation.
430 425 */
431 426 cq = *cqhdl;
432 427 mutex_enter(&cq->cq_lock);
433 428 cqc = cq->cq_cqcrsrcp;
434 429 rsrc = cq->cq_rsrcp;
435 430 pd = state->hs_pdhdl_internal;
436 431 mr = cq->cq_mrhdl;
437 432 cqnum = cq->cq_cqnum;
438 433
439 434 resize = cq->cq_resize_hdl; /* save the handle for later */
440 435
441 436 /*
442 437 * If there are work queues still associated with the CQ, then return
443 438 * an error. Otherwise, we will be holding the CQ lock.
444 439 */
445 440 if (cq->cq_refcnt != 0) {
446 441 mutex_exit(&cq->cq_lock);
447 442 return (IBT_CQ_BUSY);
448 443 }
449 444
450 445 /*
451 446 * If this was a user-mappable CQ, then we need to remove its entry
452 447 * from the "userland resources database". If it is also currently
453 448 * mmap()'d out to a user process, then we need to call
454 449 * devmap_devmem_remap() to remap the CQ memory to an invalid mapping.
455 450 * We also need to invalidate the CQ tracking information for the
456 451 * user mapping.
457 452 */
458 453 if (cq->cq_is_umap) {
459 454 status = hermon_umap_db_find(state->hs_instance, cqnum,
460 455 MLNX_UMAP_CQMEM_RSRC, &value, HERMON_UMAP_DB_REMOVE,
461 456 &umapdb);
462 457 if (status != DDI_SUCCESS) {
463 458 mutex_exit(&cq->cq_lock);
464 459 HERMON_WARNING(state, "failed to find in database");
465 460 return (ibc_get_ci_failure(0));
466 461 }
467 462 hermon_umap_db_free(umapdb);
468 463 if (cq->cq_umap_dhp != NULL) {
469 464 maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
470 465 status = devmap_devmem_remap(cq->cq_umap_dhp,
471 466 state->hs_dip, 0, 0, cq->cq_cqinfo.qa_size,
472 467 maxprot, DEVMAP_MAPPING_INVALID, NULL);
473 468 if (status != DDI_SUCCESS) {
474 469 mutex_exit(&cq->cq_lock);
475 470 HERMON_WARNING(state, "failed in CQ memory "
476 471 "devmap_devmem_remap()");
477 472 return (ibc_get_ci_failure(0));
478 473 }
479 474 cq->cq_umap_dhp = (devmap_cookie_t)NULL;
480 475 }
↓ open down ↓ |
217 lines elided |
↑ open up ↑ |
481 476 }
482 477
483 478 /*
484 479 * Put NULL into the Arbel CQNum-to-CQHdl list. This will allow any
485 480 * in-progress events to detect that the CQ corresponding to this
486 481 * number has been freed.
487 482 */
488 483 hermon_icm_set_num_to_hdl(state, HERMON_CQC, cqc->hr_indx, NULL);
489 484
490 485 mutex_exit(&cq->cq_lock);
491 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq))
492 486
493 487 /*
494 488 * Reclaim CQC entry from hardware (using the Hermon HW2SW_CQ
495 489 * firmware command). If the ownership transfer fails for any reason,
496 490 * then it is an indication that something (either in HW or SW) has
497 491 * gone seriously wrong.
498 492 */
499 493 status = hermon_cmn_ownership_cmd_post(state, HW2SW_CQ, &cqc_entry,
500 494 sizeof (hermon_hw_cqc_t), cqnum, sleepflag);
501 495 if (status != HERMON_CMD_SUCCESS) {
502 496 HERMON_WARNING(state, "failed to reclaim CQC ownership");
503 497 cmn_err(CE_CONT, "Hermon: HW2SW_CQ command failed: %08x\n",
504 498 status);
505 499 if (status == HERMON_CMD_INVALID_STATUS) {
506 500 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
507 501 }
508 502 return (ibc_get_ci_failure(0));
509 503 }
510 504
511 505 /*
512 506 * From here on, we start reliquishing resources - but check to see
513 507 * if a resize was in progress - if so, we need to relinquish those
514 508 * resources as well
515 509 */
516 510
517 511
518 512 /*
519 513 * Deregister the memory for the Completion Queue. If this fails
520 514 * for any reason, then it is an indication that something (either
521 515 * in HW or SW) has gone seriously wrong. So we print a warning
522 516 * message and return.
523 517 */
524 518 status = hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
525 519 sleepflag);
526 520 if (status != DDI_SUCCESS) {
527 521 HERMON_WARNING(state, "failed to deregister CQ memory");
528 522 return (ibc_get_ci_failure(0));
529 523 }
530 524
531 525 if (resize) { /* there was a pointer to a handle */
532 526 mr = resize->cq_mrhdl; /* reuse the pointer to the region */
533 527 status = hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
534 528 sleepflag);
535 529 if (status != DDI_SUCCESS) {
536 530 HERMON_WARNING(state, "failed to deregister resize CQ "
537 531 "memory");
538 532 return (ibc_get_ci_failure(0));
539 533 }
540 534 }
541 535
542 536 /* Free the memory for the CQ */
543 537 hermon_queue_free(&cq->cq_cqinfo);
544 538 if (resize) {
545 539 hermon_queue_free(&resize->cq_cqinfo);
546 540 /* and the temporary handle */
547 541 kmem_free(resize, sizeof (struct hermon_sw_cq_s));
548 542 }
549 543
550 544 /* everything else does not matter for the resize in progress */
551 545
552 546 /* Free the dbr */
553 547 hermon_dbr_free(state, cq->cq_uarpg, cq->cq_arm_ci_vdbr);
554 548
555 549 /* Free the Hermon Completion Queue handle */
556 550 hermon_rsrc_free(state, &rsrc);
557 551
558 552 /* Free up the CQC entry resource */
559 553 hermon_rsrc_free(state, &cqc);
560 554
561 555 /* Decrement the reference count on the protection domain (PD) */
562 556 hermon_pd_refcnt_dec(pd);
563 557
564 558 /* Set the cqhdl pointer to NULL and return success */
565 559 *cqhdl = NULL;
566 560
567 561 return (DDI_SUCCESS);
568 562 }
569 563
570 564
571 565 /*
572 566 * hermon_cq_resize()
573 567 * Context: Can be called only from user or kernel context.
574 568 */
575 569 int
576 570 hermon_cq_resize(hermon_state_t *state, hermon_cqhdl_t cq, uint_t req_size,
577 571 uint_t *actual_size, uint_t sleepflag)
578 572 {
579 573 hermon_hw_cqc_t cqc_entry;
580 574 hermon_cqhdl_t resize_hdl;
581 575 hermon_qalloc_info_t new_cqinfo;
582 576 ibt_mr_attr_t mr_attr;
583 577 hermon_mr_options_t op;
584 578 hermon_pdhdl_t pd;
585 579 hermon_mrhdl_t mr;
586 580 hermon_hw_cqe_t *buf;
587 581 uint32_t new_prod_indx;
588 582 uint_t log_cq_size;
589 583 int status, flag;
590 584
591 585 if (cq->cq_resize_hdl != 0) { /* already in process */
592 586 status = IBT_CQ_BUSY;
593 587 goto cqresize_fail;
594 588 }
595 589
596 590
597 591 /* Use the internal protection domain (PD) for CQs */
598 592 pd = state->hs_pdhdl_internal;
599 593
600 594 /*
601 595 * Calculate the appropriate size for the new resized completion queue.
602 596 * Note: All Hermon CQs must be a power-of-2 minus 1 in size. Also
603 597 * they may not be any smaller than HERMON_CQ_MIN_SIZE. This step is
604 598 * to round the requested size up to the next highest power-of-2
605 599 */
606 600 req_size = max(req_size, HERMON_CQ_MIN_SIZE);
607 601 log_cq_size = highbit(req_size);
608 602
609 603 /*
610 604 * Next we verify that the rounded-up size is valid (i.e. consistent
611 605 * with the device limits and/or software-configured limits)
612 606 */
613 607 if (log_cq_size > state->hs_cfg_profile->cp_log_max_cq_sz) {
614 608 status = IBT_HCA_CQ_EXCEEDED;
615 609 goto cqresize_fail;
616 610 }
617 611
618 612 /*
619 613 * Allocate the memory for newly resized Completion Queue.
620 614 *
621 615 * Note: Although we use the common queue allocation routine, we
622 616 * always specify HERMON_QUEUE_LOCATION_NORMAL (i.e. CQ located in
623 617 * kernel system memory) for kernel CQs because it would be
624 618 * inefficient to have CQs located in DDR memory. This is the same
625 619 * as we do when we first allocate completion queues primarily
626 620 * because CQs are read from (by software) more than they are written
627 621 * to. (We always specify HERMON_QUEUE_LOCATION_USERLAND for all
628 622 * user-mappable CQs for a similar reason.)
629 623 * It is also worth noting that, unlike Hermon QP work queues,
630 624 * completion queues do not have the same strict alignment
631 625 * requirements. It is sufficient for the CQ memory to be both
632 626 * aligned to and bound to addresses which are a multiple of CQE size.
633 627 */
634 628
635 629 /* first, alloc the resize_handle */
636 630 resize_hdl = kmem_zalloc(sizeof (struct hermon_sw_cq_s), KM_SLEEP);
637 631
638 632 new_cqinfo.qa_size = (1 << log_cq_size) * sizeof (hermon_hw_cqe_t);
639 633 new_cqinfo.qa_alloc_align = PAGESIZE;
640 634 new_cqinfo.qa_bind_align = PAGESIZE;
641 635 if (cq->cq_is_umap) {
642 636 new_cqinfo.qa_location = HERMON_QUEUE_LOCATION_USERLAND;
643 637 } else {
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
644 638 new_cqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
645 639 }
646 640 status = hermon_queue_alloc(state, &new_cqinfo, sleepflag);
647 641 if (status != DDI_SUCCESS) {
648 642 /* free the resize handle */
649 643 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
650 644 status = IBT_INSUFF_RESOURCE;
651 645 goto cqresize_fail;
652 646 }
653 647 buf = (hermon_hw_cqe_t *)new_cqinfo.qa_buf_aligned;
654 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))
655 648
656 649 /*
657 650 * No initialization of the cq is needed - the command will do it
658 651 */
659 652
660 653 /*
661 654 * Register the memory for the CQ. The memory for the CQ must
662 655 * be registered in the Hermon TPT tables. This gives us the LKey
663 656 * to specify in the CQ context below.
664 657 */
665 658 flag = (sleepflag == HERMON_SLEEP) ? IBT_MR_SLEEP : IBT_MR_NOSLEEP;
666 659 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
667 660 mr_attr.mr_len = new_cqinfo.qa_size;
668 661 mr_attr.mr_as = NULL;
669 662 mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
670 663 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
671 664 op.mro_bind_dmahdl = new_cqinfo.qa_dmahdl;
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
672 665 op.mro_bind_override_addr = 0;
673 666 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
674 667 HERMON_CQ_CMPT);
675 668 if (status != DDI_SUCCESS) {
676 669 hermon_queue_free(&new_cqinfo);
677 670 /* free the resize handle */
678 671 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
679 672 status = IBT_INSUFF_RESOURCE;
680 673 goto cqresize_fail;
681 674 }
682 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
683 675
684 676 /*
685 677 * Now we grab the CQ lock. Since we will be updating the actual
686 678 * CQ location and the producer/consumer indexes, we should hold
687 679 * the lock.
688 680 *
689 681 * We do a ARBEL_NOSLEEP here (and below), though, because we are
690 682 * holding the "cq_lock" and if we got raised to interrupt level
691 683 * by priority inversion, we would not want to block in this routine
692 684 * waiting for success.
693 685 */
694 686 mutex_enter(&cq->cq_lock);
695 687
696 688 /*
697 689 * Fill in the CQC entry. For the resize operation this is the
698 690 * final step before attempting the resize operation on the CQC entry.
699 691 * We use all of the information collected/calculated above to fill
700 692 * in the requisite portions of the CQC.
701 693 */
702 694 bzero(&cqc_entry, sizeof (hermon_hw_cqc_t));
703 695 cqc_entry.log_cq_sz = log_cq_size;
704 696 cqc_entry.pg_offs = new_cqinfo.qa_pgoffs >> 5;
705 697 cqc_entry.log2_pgsz = mr->mr_log2_pgsz;
706 698 cqc_entry.mtt_base_addh = (uint32_t)((mr->mr_mttaddr >> 32) & 0xFF);
707 699 cqc_entry.mtt_base_addl = mr->mr_mttaddr >> 3;
708 700
709 701 /*
710 702 * Write the CQC entry to hardware. Lastly, we pass ownership of
711 703 * the entry to the hardware (using the Hermon RESIZE_CQ firmware
712 704 * command). Note: In general, this operation shouldn't fail. But
713 705 * if it does, we have to undo everything we've done above before
714 706 * returning error. Also note that the status returned may indicate
715 707 * the code to return to the IBTF.
716 708 */
717 709 status = hermon_resize_cq_cmd_post(state, &cqc_entry, cq->cq_cqnum,
718 710 &new_prod_indx, HERMON_CMD_NOSLEEP_SPIN);
719 711 if (status != HERMON_CMD_SUCCESS) {
720 712 /* Resize attempt has failed, drop CQ lock and cleanup */
721 713 mutex_exit(&cq->cq_lock);
722 714 if (hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL,
723 715 sleepflag) != DDI_SUCCESS) {
724 716 HERMON_WARNING(state, "failed to deregister CQ memory");
725 717 }
726 718 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
727 719 hermon_queue_free(&new_cqinfo);
728 720 if (status == HERMON_CMD_BAD_SIZE) {
729 721 return (IBT_CQ_SZ_INSUFFICIENT);
730 722 } else {
731 723 cmn_err(CE_CONT, "Hermon: RESIZE_CQ command failed: "
732 724 "%08x\n", status);
733 725 if (status == HERMON_CMD_INVALID_STATUS) {
734 726 hermon_fm_ereport(state, HCA_SYS_ERR,
735 727 HCA_ERR_SRV_LOST);
736 728 }
737 729 return (ibc_get_ci_failure(0));
738 730 }
739 731 }
740 732
741 733 /*
742 734 * For Hermon, we've alloc'd another handle structure and save off the
743 735 * important things in it. Then, in polling we check to see if there's
744 736 * a "resizing handle" and if so we look for the "special CQE", opcode
745 737 * 0x16, that indicates the transition to the new buffer.
746 738 *
747 739 * At that point, we'll adjust everything - including dereg and
748 740 * freeing of the original buffer, updating all the necessary fields
749 741 * in the cq_hdl, and setting up for the next cqe polling
750 742 */
751 743
752 744 resize_hdl->cq_buf = buf;
753 745 resize_hdl->cq_bufsz = (1 << log_cq_size);
754 746 resize_hdl->cq_mrhdl = mr;
755 747 resize_hdl->cq_log_cqsz = log_cq_size;
756 748
757 749 bcopy(&new_cqinfo, &(resize_hdl->cq_cqinfo),
758 750 sizeof (struct hermon_qalloc_info_s));
759 751
760 752 /* now, save the address in the cq_handle */
761 753 cq->cq_resize_hdl = resize_hdl;
762 754
763 755 /*
764 756 * Drop the CQ lock now.
765 757 */
766 758
767 759 mutex_exit(&cq->cq_lock);
768 760 /*
769 761 * Fill in the return arguments (if necessary). This includes the
770 762 * real new completion queue size.
771 763 */
772 764 if (actual_size != NULL) {
773 765 *actual_size = (1 << log_cq_size) - 1;
774 766 }
775 767
776 768 return (DDI_SUCCESS);
777 769
778 770 cqresize_fail:
779 771 return (status);
780 772 }
781 773
782 774
783 775 /*
784 776 * hermon_cq_modify()
785 777 * Context: Can be called base context.
786 778 */
787 779 /* ARGSUSED */
788 780 int
789 781 hermon_cq_modify(hermon_state_t *state, hermon_cqhdl_t cq,
790 782 uint_t count, uint_t usec, ibt_cq_handler_id_t hid, uint_t sleepflag)
791 783 {
792 784 int status;
793 785 hermon_hw_cqc_t cqc_entry;
794 786
795 787 mutex_enter(&cq->cq_lock);
796 788 if (count != cq->cq_intmod_count ||
797 789 usec != cq->cq_intmod_usec) {
798 790 bzero(&cqc_entry, sizeof (hermon_hw_cqc_t));
799 791 cqc_entry.cq_max_cnt = count;
800 792 cqc_entry.cq_period = usec;
801 793 status = hermon_modify_cq_cmd_post(state, &cqc_entry,
802 794 cq->cq_cqnum, MODIFY_MODERATION_CQ, sleepflag);
803 795 if (status != HERMON_CMD_SUCCESS) {
804 796 mutex_exit(&cq->cq_lock);
805 797 cmn_err(CE_CONT, "Hermon: MODIFY_MODERATION_CQ "
806 798 "command failed: %08x\n", status);
807 799 if (status == HERMON_CMD_INVALID_STATUS) {
808 800 hermon_fm_ereport(state, HCA_SYS_ERR,
809 801 HCA_ERR_SRV_LOST);
810 802 }
811 803 return (ibc_get_ci_failure(0));
812 804 }
813 805 cq->cq_intmod_count = count;
814 806 cq->cq_intmod_usec = usec;
815 807 }
816 808 if (hid && (hid - 1 != cq->cq_eqnum)) {
817 809 bzero(&cqc_entry, sizeof (hermon_hw_cqc_t));
818 810 cqc_entry.c_eqn = HERMON_HID_TO_EQNUM(state, hid);
819 811 status = hermon_modify_cq_cmd_post(state, &cqc_entry,
820 812 cq->cq_cqnum, MODIFY_EQN, sleepflag);
821 813 if (status != HERMON_CMD_SUCCESS) {
822 814 mutex_exit(&cq->cq_lock);
823 815 cmn_err(CE_CONT, "Hermon: MODIFY_EQN command failed: "
824 816 "%08x\n", status);
825 817 if (status == HERMON_CMD_INVALID_STATUS) {
826 818 hermon_fm_ereport(state, HCA_SYS_ERR,
827 819 HCA_ERR_SRV_LOST);
828 820 }
829 821 return (ibc_get_ci_failure(0));
830 822 }
831 823 cq->cq_eqnum = hid - 1;
832 824 }
833 825 mutex_exit(&cq->cq_lock);
834 826 return (DDI_SUCCESS);
835 827 }
836 828
837 829 /*
838 830 * hermon_cq_notify()
839 831 * Context: Can be called from interrupt or base context.
840 832 */
841 833 int
842 834 hermon_cq_notify(hermon_state_t *state, hermon_cqhdl_t cq,
843 835 ibt_cq_notify_flags_t flags)
844 836 {
845 837 uint_t cmd;
846 838 ibt_status_t status;
847 839
848 840 /* Validate IBT flags and call doorbell routine. */
849 841 if (flags == IBT_NEXT_COMPLETION) {
850 842 cmd = HERMON_CQDB_NOTIFY_CQ;
851 843 } else if (flags == IBT_NEXT_SOLICITED) {
852 844 cmd = HERMON_CQDB_NOTIFY_CQ_SOLICIT;
853 845 } else {
854 846 return (IBT_CQ_NOTIFY_TYPE_INVALID);
855 847 }
856 848
857 849 status = hermon_cq_arm_doorbell(state, cq, cmd);
858 850 return (status);
859 851 }
860 852
861 853
862 854 /*
863 855 * hermon_cq_poll()
864 856 * Context: Can be called from interrupt or base context.
865 857 */
866 858 int
867 859 hermon_cq_poll(hermon_state_t *state, hermon_cqhdl_t cq, ibt_wc_t *wc_p,
868 860 uint_t num_wc, uint_t *num_polled)
869 861 {
870 862 hermon_hw_cqe_t *cqe;
871 863 uint_t opcode;
872 864 uint32_t cons_indx, wrap_around_mask, shift, mask;
873 865 uint32_t polled_cnt, spec_op = 0;
874 866 int status;
875 867
876 868 /*
877 869 * Check for user-mappable CQ memory. Note: We do not allow kernel
878 870 * clients to poll CQ memory that is accessible directly by the user.
879 871 * If the CQ memory is user accessible, then return an error.
880 872 */
881 873 if (cq->cq_is_umap) {
882 874 return (IBT_CQ_HDL_INVALID);
883 875 }
884 876
885 877 mutex_enter(&cq->cq_lock);
886 878
887 879 /* Get the consumer index */
888 880 cons_indx = cq->cq_consindx;
889 881 shift = cq->cq_log_cqsz;
890 882 mask = cq->cq_bufsz;
891 883
892 884 /*
893 885 * Calculate the wrap around mask. Note: This operation only works
894 886 * because all Hermon completion queues have power-of-2 sizes
895 887 */
896 888 wrap_around_mask = (cq->cq_bufsz - 1);
897 889
898 890 /* Calculate the pointer to the first CQ entry */
899 891 cqe = &cq->cq_buf[cons_indx & wrap_around_mask];
900 892
901 893 /*
902 894 * Keep pulling entries from the CQ until we find an entry owned by
903 895 * the hardware. As long as there the CQE's owned by SW, process
904 896 * each entry by calling hermon_cq_cqe_consume() and updating the CQ
905 897 * consumer index. Note: We only update the consumer index if
906 898 * hermon_cq_cqe_consume() returns HERMON_CQ_SYNC_AND_DB. Otherwise,
907 899 * it indicates that we are going to "recycle" the CQE (probably
908 900 * because it is a error CQE and corresponds to more than one
909 901 * completion).
910 902 */
911 903 polled_cnt = 0;
912 904 while (HERMON_CQE_OWNER_IS_SW(cq, cqe, cons_indx, shift, mask)) {
913 905 if (cq->cq_resize_hdl != 0) { /* in midst of resize */
914 906 /* peek at the opcode */
915 907 opcode = HERMON_CQE_OPCODE_GET(cq, cqe);
916 908 if (opcode == HERMON_CQE_RCV_RESIZE_CODE) {
917 909 hermon_cq_resize_helper(state, cq);
918 910
919 911 /* Increment the consumer index */
920 912 cons_indx = (cons_indx + 1);
921 913 spec_op = 1; /* plus one for the limiting CQE */
922 914
923 915 wrap_around_mask = (cq->cq_bufsz - 1);
924 916
925 917 /* Update the pointer to the next CQ entry */
926 918 cqe = &cq->cq_buf[cons_indx & wrap_around_mask];
927 919
928 920 continue;
929 921 }
930 922 } /* in resizing CQ */
931 923
932 924 /*
933 925 * either resizing and not the special opcode, or
934 926 * not resizing at all
935 927 */
936 928 hermon_cq_cqe_consume(state, cq, cqe, &wc_p[polled_cnt++]);
937 929
938 930 /* Increment the consumer index */
939 931 cons_indx = (cons_indx + 1);
940 932
941 933 /* Update the pointer to the next CQ entry */
942 934 cqe = &cq->cq_buf[cons_indx & wrap_around_mask];
943 935
944 936 /*
945 937 * If we have run out of space to store work completions,
946 938 * then stop and return the ones we have pulled of the CQ.
947 939 */
948 940 if (polled_cnt >= num_wc) {
949 941 break;
950 942 }
951 943 }
952 944
953 945 /*
954 946 * Now we only ring the doorbell (to update the consumer index) if
955 947 * we've actually consumed a CQ entry.
956 948 */
957 949 if ((polled_cnt != 0) && (cq->cq_consindx != cons_indx)) {
958 950 /*
959 951 * Update the consumer index in both the CQ handle and the
960 952 * doorbell record.
961 953 */
962 954 cq->cq_consindx = cons_indx;
963 955 hermon_cq_update_ci_doorbell(cq);
964 956
965 957 } else if (polled_cnt == 0) {
966 958 if (spec_op != 0) {
967 959 /* if we got the special opcode, update the consindx */
968 960 cq->cq_consindx = cons_indx;
969 961 hermon_cq_update_ci_doorbell(cq);
970 962 }
971 963 }
972 964
973 965 mutex_exit(&cq->cq_lock);
974 966
975 967 /* Set "num_polled" (if necessary) */
976 968 if (num_polled != NULL) {
977 969 *num_polled = polled_cnt;
978 970 }
979 971
980 972 /* Set CQ_EMPTY condition if needed, otherwise return success */
981 973 if (polled_cnt == 0) {
982 974 status = IBT_CQ_EMPTY;
983 975 } else {
984 976 status = DDI_SUCCESS;
985 977 }
986 978
987 979 /*
988 980 * Check if the system is currently panicking. If it is, then call
989 981 * the Hermon interrupt service routine. This step is necessary here
990 982 * because we might be in a polled I/O mode and without the call to
991 983 * hermon_isr() - and its subsequent calls to poll and rearm each
992 984 * event queue - we might overflow our EQs and render the system
993 985 * unable to sync/dump.
994 986 */
995 987 if (ddi_in_panic() != 0) {
996 988 (void) hermon_isr((caddr_t)state, (caddr_t)NULL);
997 989 }
998 990 return (status);
999 991 }
1000 992
1001 993 /*
1002 994 * cmd_sn must be initialized to 1 to enable proper reenabling
1003 995 * by hermon_arm_cq_dbr_update().
1004 996 */
1005 997 static void
1006 998 hermon_arm_cq_dbr_init(hermon_dbr_t *cq_arm_dbr)
1007 999 {
1008 1000 uint32_t *target;
1009 1001
1010 1002 target = (uint32_t *)cq_arm_dbr + 1;
1011 1003 *target = htonl(1 << HERMON_CQDB_CMDSN_SHIFT);
1012 1004 }
1013 1005
1014 1006
1015 1007 /*
1016 1008 * User cmd_sn needs help from this kernel function to know
1017 1009 * when it should be incremented (modulo 4). We do an atomic
1018 1010 * update of the arm_cq dbr to communicate this fact. We retry
1019 1011 * in the case that user library is racing with us. We zero
1020 1012 * out the cmd field so that the user library can use the cmd
1021 1013 * field to track the last command it issued (solicited verses any).
1022 1014 */
1023 1015 static void
1024 1016 hermon_arm_cq_dbr_update(hermon_dbr_t *cq_arm_dbr)
1025 1017 {
1026 1018 uint32_t tmp, cmp, new;
1027 1019 uint32_t old_cmd_sn, new_cmd_sn;
1028 1020 uint32_t *target;
1029 1021 int retries = 0;
1030 1022
1031 1023 target = (uint32_t *)cq_arm_dbr + 1;
1032 1024 retry:
1033 1025 cmp = *target;
1034 1026 tmp = htonl(cmp);
1035 1027 old_cmd_sn = tmp & (0x3 << HERMON_CQDB_CMDSN_SHIFT);
1036 1028 new_cmd_sn = (old_cmd_sn + (0x1 << HERMON_CQDB_CMDSN_SHIFT)) &
1037 1029 (0x3 << HERMON_CQDB_CMDSN_SHIFT);
1038 1030 new = htonl((tmp & ~(0x37 << HERMON_CQDB_CMD_SHIFT)) | new_cmd_sn);
1039 1031 tmp = atomic_cas_32(target, cmp, new);
1040 1032 if (tmp != cmp) { /* cas failed, so need to retry */
1041 1033 drv_usecwait(retries & 0xff); /* avoid race */
1042 1034 if (++retries > 100000) {
1043 1035 cmn_err(CE_CONT, "cas failed in hermon\n");
1044 1036 retries = 0;
1045 1037 }
1046 1038 goto retry;
1047 1039 }
1048 1040 }
1049 1041
1050 1042
1051 1043 /*
1052 1044 * hermon_cq_handler()
1053 1045 * Context: Only called from interrupt context
1054 1046 */
1055 1047 /* ARGSUSED */
1056 1048 int
1057 1049 hermon_cq_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1058 1050 hermon_hw_eqe_t *eqe)
1059 1051 {
1060 1052 hermon_cqhdl_t cq;
1061 1053 uint_t cqnum;
1062 1054
1063 1055 /* Get the CQ handle from CQ number in event descriptor */
1064 1056 cqnum = HERMON_EQE_CQNUM_GET(eq, eqe);
1065 1057 cq = hermon_cqhdl_from_cqnum(state, cqnum);
1066 1058
1067 1059 /*
1068 1060 * If the CQ handle is NULL, this is probably an indication
1069 1061 * that the CQ has been freed already. In which case, we
1070 1062 * should not deliver this event.
1071 1063 *
1072 1064 * We also check that the CQ number in the handle is the
1073 1065 * same as the CQ number in the event queue entry. This
1074 1066 * extra check allows us to handle the case where a CQ was
1075 1067 * freed and then allocated again in the time it took to
1076 1068 * handle the event queue processing. By constantly incrementing
1077 1069 * the non-constrained portion of the CQ number every time
1078 1070 * a new CQ is allocated, we mitigate (somewhat) the chance
1079 1071 * that a stale event could be passed to the client's CQ
1080 1072 * handler.
1081 1073 *
1082 1074 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it
1083 1075 * means that we've have either received this event before we
1084 1076 * finished attaching to the IBTF or we've received it while we
1085 1077 * are in the process of detaching.
1086 1078 */
1087 1079 if ((cq != NULL) && (cq->cq_cqnum == cqnum) &&
1088 1080 (state->hs_ibtfpriv != NULL)) {
1089 1081 hermon_arm_cq_dbr_update(cq->cq_arm_ci_vdbr);
1090 1082 HERMON_DO_IBTF_CQ_CALLB(state, cq);
1091 1083 }
1092 1084
1093 1085 return (DDI_SUCCESS);
1094 1086 }
1095 1087
1096 1088
1097 1089 /*
1098 1090 * hermon_cq_err_handler()
1099 1091 * Context: Only called from interrupt context
1100 1092 */
1101 1093 /* ARGSUSED */
1102 1094 int
1103 1095 hermon_cq_err_handler(hermon_state_t *state, hermon_eqhdl_t eq,
1104 1096 hermon_hw_eqe_t *eqe)
1105 1097 {
1106 1098 hermon_cqhdl_t cq;
1107 1099 uint_t cqnum;
1108 1100 ibc_async_event_t event;
1109 1101 ibt_async_code_t type;
1110 1102
1111 1103 HERMON_FMANOTE(state, HERMON_FMA_OVERRUN);
1112 1104 /* Get the CQ handle from CQ number in event descriptor */
1113 1105 cqnum = HERMON_EQE_CQNUM_GET(eq, eqe);
1114 1106 cq = hermon_cqhdl_from_cqnum(state, cqnum);
1115 1107
1116 1108 /*
1117 1109 * If the CQ handle is NULL, this is probably an indication
1118 1110 * that the CQ has been freed already. In which case, we
1119 1111 * should not deliver this event.
1120 1112 *
1121 1113 * We also check that the CQ number in the handle is the
1122 1114 * same as the CQ number in the event queue entry. This
1123 1115 * extra check allows us to handle the case where a CQ was
1124 1116 * freed and then allocated again in the time it took to
1125 1117 * handle the event queue processing. By constantly incrementing
1126 1118 * the non-constrained portion of the CQ number every time
1127 1119 * a new CQ is allocated, we mitigate (somewhat) the chance
1128 1120 * that a stale event could be passed to the client's CQ
1129 1121 * handler.
1130 1122 *
1131 1123 * And then we check if "hs_ibtfpriv" is NULL. If it is then it
1132 1124 * means that we've have either received this event before we
1133 1125 * finished attaching to the IBTF or we've received it while we
1134 1126 * are in the process of detaching.
1135 1127 */
1136 1128 if ((cq != NULL) && (cq->cq_cqnum == cqnum) &&
1137 1129 (state->hs_ibtfpriv != NULL)) {
1138 1130 event.ev_cq_hdl = (ibt_cq_hdl_t)cq->cq_hdlrarg;
1139 1131 type = IBT_ERROR_CQ;
1140 1132 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event);
1141 1133 }
1142 1134
1143 1135 return (DDI_SUCCESS);
1144 1136 }
1145 1137
1146 1138
1147 1139 /*
1148 1140 * hermon_cq_refcnt_inc()
1149 1141 * Context: Can be called from interrupt or base context.
1150 1142 */
1151 1143 int
1152 1144 hermon_cq_refcnt_inc(hermon_cqhdl_t cq, uint_t is_special)
1153 1145 {
1154 1146 /*
1155 1147 * Increment the completion queue's reference count. Note: In order
1156 1148 * to ensure compliance with IBA C11-15, we must ensure that a given
1157 1149 * CQ is not used for both special (SMI/GSI) QP and non-special QP.
1158 1150 * This is accomplished here by keeping track of how the referenced
1159 1151 * CQ is being used.
1160 1152 */
1161 1153 mutex_enter(&cq->cq_lock);
1162 1154 if (cq->cq_refcnt == 0) {
1163 1155 cq->cq_is_special = is_special;
1164 1156 } else {
1165 1157 if (cq->cq_is_special != is_special) {
1166 1158 mutex_exit(&cq->cq_lock);
1167 1159 return (DDI_FAILURE);
1168 1160 }
1169 1161 }
1170 1162 cq->cq_refcnt++;
1171 1163 mutex_exit(&cq->cq_lock);
1172 1164 return (DDI_SUCCESS);
1173 1165 }
1174 1166
1175 1167
1176 1168 /*
1177 1169 * hermon_cq_refcnt_dec()
1178 1170 * Context: Can be called from interrupt or base context.
1179 1171 */
1180 1172 void
1181 1173 hermon_cq_refcnt_dec(hermon_cqhdl_t cq)
1182 1174 {
1183 1175 /* Decrement the completion queue's reference count */
1184 1176 mutex_enter(&cq->cq_lock);
1185 1177 cq->cq_refcnt--;
1186 1178 mutex_exit(&cq->cq_lock);
1187 1179 }
1188 1180
1189 1181
1190 1182 /*
1191 1183 * hermon_cq_arm_doorbell()
1192 1184 * Context: Can be called from interrupt or base context.
1193 1185 */
1194 1186 static int
1195 1187 hermon_cq_arm_doorbell(hermon_state_t *state, hermon_cqhdl_t cq, uint_t cq_cmd)
1196 1188 {
1197 1189 uint32_t cq_num;
1198 1190 uint32_t *target;
1199 1191 uint32_t old_cmd, cmp, new, tmp, cmd_sn;
1200 1192 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state);
1201 1193
1202 1194 /* initialize the FMA retry loop */
1203 1195 hermon_pio_init(fm_loop_cnt, fm_status, fm_test_num);
1204 1196
1205 1197 cq_num = cq->cq_cqnum;
1206 1198 target = (uint32_t *)cq->cq_arm_ci_vdbr + 1;
1207 1199
1208 1200 /* the FMA retry loop starts for Hermon doorbell register. */
1209 1201 hermon_pio_start(state, uarhdl, pio_error, fm_loop_cnt, fm_status,
1210 1202 fm_test_num);
1211 1203 retry:
1212 1204 cmp = *target;
1213 1205 tmp = htonl(cmp);
1214 1206 old_cmd = tmp & (0x7 << HERMON_CQDB_CMD_SHIFT);
1215 1207 cmd_sn = tmp & (0x3 << HERMON_CQDB_CMDSN_SHIFT);
1216 1208 if (cq_cmd == HERMON_CQDB_NOTIFY_CQ) {
1217 1209 if (old_cmd != HERMON_CQDB_NOTIFY_CQ) {
1218 1210 cmd_sn |= (HERMON_CQDB_NOTIFY_CQ <<
1219 1211 HERMON_CQDB_CMD_SHIFT);
1220 1212 new = htonl(cmd_sn | (cq->cq_consindx & 0xFFFFFF));
1221 1213 tmp = atomic_cas_32(target, cmp, new);
1222 1214 if (tmp != cmp)
1223 1215 goto retry;
1224 1216 HERMON_UAR_DOORBELL(state, uarhdl, (uint64_t *)(void *)
1225 1217 &state->hs_uar->cq, (((uint64_t)cmd_sn | cq_num) <<
1226 1218 32) | (cq->cq_consindx & 0xFFFFFF));
1227 1219 } /* else it's already armed */
1228 1220 } else {
1229 1221 ASSERT(cq_cmd == HERMON_CQDB_NOTIFY_CQ_SOLICIT);
1230 1222 if (old_cmd != HERMON_CQDB_NOTIFY_CQ &&
1231 1223 old_cmd != HERMON_CQDB_NOTIFY_CQ_SOLICIT) {
1232 1224 cmd_sn |= (HERMON_CQDB_NOTIFY_CQ_SOLICIT <<
1233 1225 HERMON_CQDB_CMD_SHIFT);
1234 1226 new = htonl(cmd_sn | (cq->cq_consindx & 0xFFFFFF));
1235 1227 tmp = atomic_cas_32(target, cmp, new);
1236 1228 if (tmp != cmp)
1237 1229 goto retry;
1238 1230 HERMON_UAR_DOORBELL(state, uarhdl, (uint64_t *)(void *)
1239 1231 &state->hs_uar->cq, (((uint64_t)cmd_sn | cq_num) <<
1240 1232 32) | (cq->cq_consindx & 0xFFFFFF));
1241 1233 } /* else it's already armed */
1242 1234 }
1243 1235
1244 1236 /* the FMA retry loop ends. */
1245 1237 hermon_pio_end(state, uarhdl, pio_error, fm_loop_cnt, fm_status,
1246 1238 fm_test_num);
1247 1239
1248 1240 return (IBT_SUCCESS);
1249 1241
1250 1242 pio_error:
1251 1243 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1252 1244 return (ibc_get_ci_failure(0));
1253 1245 }
1254 1246
1255 1247
1256 1248 /*
1257 1249 * hermon_cqhdl_from_cqnum()
1258 1250 * Context: Can be called from interrupt or base context.
1259 1251 *
1260 1252 * This routine is important because changing the unconstrained
1261 1253 * portion of the CQ number is critical to the detection of a
1262 1254 * potential race condition in the CQ handler code (i.e. the case
1263 1255 * where a CQ is freed and alloc'd again before an event for the
1264 1256 * "old" CQ can be handled).
1265 1257 *
1266 1258 * While this is not a perfect solution (not sure that one exists)
1267 1259 * it does help to mitigate the chance that this race condition will
1268 1260 * cause us to deliver a "stale" event to the new CQ owner. Note:
1269 1261 * this solution does not scale well because the number of constrained
1270 1262 * bits increases (and, hence, the number of unconstrained bits
1271 1263 * decreases) as the number of supported CQs grows. For small and
1272 1264 * intermediate values, it should hopefully provide sufficient
1273 1265 * protection.
1274 1266 */
1275 1267 hermon_cqhdl_t
1276 1268 hermon_cqhdl_from_cqnum(hermon_state_t *state, uint_t cqnum)
1277 1269 {
1278 1270 uint_t cqindx, cqmask;
1279 1271
1280 1272 /* Calculate the CQ table index from the cqnum */
1281 1273 cqmask = (1 << state->hs_cfg_profile->cp_log_num_cq) - 1;
1282 1274 cqindx = cqnum & cqmask;
1283 1275 return (hermon_icm_num_to_hdl(state, HERMON_CQC, cqindx));
1284 1276 }
1285 1277
1286 1278 /*
1287 1279 * hermon_cq_cqe_consume()
1288 1280 * Context: Can be called from interrupt or base context.
1289 1281 */
1290 1282 static void
1291 1283 hermon_cq_cqe_consume(hermon_state_t *state, hermon_cqhdl_t cq,
1292 1284 hermon_hw_cqe_t *cqe, ibt_wc_t *wc)
1293 1285 {
1294 1286 uint_t opcode, qpnum, qp1_indx;
1295 1287 ibt_wc_flags_t flags;
1296 1288 ibt_wrc_opcode_t type;
1297 1289
1298 1290 /*
1299 1291 * Determine if this is an "error" CQE by examining "opcode". If it
1300 1292 * is an error CQE, then call hermon_cq_errcqe_consume() and return
1301 1293 * whatever status it returns. Otherwise, this is a successful
1302 1294 * completion.
1303 1295 */
1304 1296 opcode = HERMON_CQE_OPCODE_GET(cq, cqe);
1305 1297 if ((opcode == HERMON_CQE_SEND_ERR_OPCODE) ||
1306 1298 (opcode == HERMON_CQE_RECV_ERR_OPCODE)) {
1307 1299 hermon_cq_errcqe_consume(state, cq, cqe, wc);
1308 1300 return;
1309 1301 }
1310 1302
1311 1303 /*
1312 1304 * Fetch the Work Request ID using the information in the CQE.
↓ open down ↓ |
620 lines elided |
↑ open up ↑ |
1313 1305 * See hermon_wr.c for more details.
1314 1306 */
1315 1307 wc->wc_id = hermon_wrid_get_entry(cq, cqe);
1316 1308
1317 1309 /*
1318 1310 * Parse the CQE opcode to determine completion type. This will set
1319 1311 * not only the type of the completion, but also any flags that might
1320 1312 * be associated with it (e.g. whether immediate data is present).
1321 1313 */
1322 1314 flags = IBT_WC_NO_FLAGS;
1323 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->hs_fcoib_may_be_running))
1324 1315 if (HERMON_CQE_SENDRECV_GET(cq, cqe) != HERMON_COMPLETION_RECV) {
1325 1316
1326 1317 /* Send CQE */
1327 1318 switch (opcode) {
1328 1319 case HERMON_CQE_SND_RDMAWR_IMM:
1329 1320 case HERMON_CQE_SND_RDMAWR:
1330 1321 type = IBT_WRC_RDMAW;
1331 1322 break;
1332 1323
1333 1324 case HERMON_CQE_SND_SEND_INV:
1334 1325 case HERMON_CQE_SND_SEND_IMM:
1335 1326 case HERMON_CQE_SND_SEND:
1336 1327 type = IBT_WRC_SEND;
1337 1328 break;
1338 1329
1339 1330 case HERMON_CQE_SND_LSO:
1340 1331 type = IBT_WRC_SEND_LSO;
1341 1332 break;
1342 1333
1343 1334 case HERMON_CQE_SND_RDMARD:
1344 1335 type = IBT_WRC_RDMAR;
1345 1336 break;
1346 1337
1347 1338 case HERMON_CQE_SND_ATOMIC_CS:
1348 1339 type = IBT_WRC_CSWAP;
1349 1340 break;
1350 1341
1351 1342 case HERMON_CQE_SND_ATOMIC_FA:
1352 1343 type = IBT_WRC_FADD;
1353 1344 break;
1354 1345
1355 1346 case HERMON_CQE_SND_BIND_MW:
1356 1347 type = IBT_WRC_BIND;
1357 1348 break;
1358 1349
1359 1350 case HERMON_CQE_SND_FRWR:
1360 1351 type = IBT_WRC_FAST_REG_PMR;
1361 1352 break;
1362 1353
1363 1354 case HERMON_CQE_SND_LCL_INV:
1364 1355 type = IBT_WRC_LOCAL_INVALIDATE;
1365 1356 break;
1366 1357
1367 1358 default:
1368 1359 HERMON_WARNING(state, "unknown send CQE type");
1369 1360 wc->wc_status = IBT_WC_LOCAL_QP_OP_ERR;
1370 1361 return;
1371 1362 }
1372 1363 } else if ((state->hs_fcoib_may_be_running == B_TRUE) &&
1373 1364 hermon_fcoib_is_fexch_qpn(state, HERMON_CQE_QPNUM_GET(cq, cqe))) {
1374 1365 type = IBT_WRC_RECV;
1375 1366 if (HERMON_CQE_FEXCH_DIFE(cq, cqe))
1376 1367 flags |= IBT_WC_DIF_ERROR;
1377 1368 wc->wc_bytes_xfer = HERMON_CQE_BYTECNT_GET(cq, cqe);
1378 1369 wc->wc_fexch_seq_cnt = HERMON_CQE_FEXCH_SEQ_CNT(cq, cqe);
1379 1370 wc->wc_fexch_tx_bytes_xfer = HERMON_CQE_FEXCH_TX_BYTES(cq, cqe);
1380 1371 wc->wc_fexch_rx_bytes_xfer = HERMON_CQE_FEXCH_RX_BYTES(cq, cqe);
1381 1372 wc->wc_fexch_seq_id = HERMON_CQE_FEXCH_SEQ_ID(cq, cqe);
1382 1373 wc->wc_detail = HERMON_CQE_FEXCH_DETAIL(cq, cqe) &
1383 1374 IBT_WC_DETAIL_FC_MATCH_MASK;
1384 1375 wc->wc_rkey = HERMON_CQE_IMM_ETH_PKEY_CRED_GET(cq, cqe);
1385 1376 flags |= IBT_WC_FEXCH_FMT | IBT_WC_RKEY_INVALIDATED;
1386 1377 } else {
1387 1378 /*
1388 1379 * Parse the remaining contents of the CQE into the work
1389 1380 * completion. This means filling in SL, QP number, SLID,
1390 1381 * immediate data, etc.
1391 1382 *
1392 1383 * Note: Not all of these fields are valid in a given
1393 1384 * completion. Many of them depend on the actual type of
1394 1385 * completion. So we fill in all of the fields and leave
1395 1386 * it up to the IBTF and consumer to sort out which are
1396 1387 * valid based on their context.
1397 1388 */
1398 1389 wc->wc_sl = HERMON_CQE_SL_GET(cq, cqe);
1399 1390 wc->wc_qpn = HERMON_CQE_DQPN_GET(cq, cqe);
1400 1391 wc->wc_slid = HERMON_CQE_DLID_GET(cq, cqe);
1401 1392 wc->wc_immed_data =
1402 1393 HERMON_CQE_IMM_ETH_PKEY_CRED_GET(cq, cqe);
1403 1394 wc->wc_ethertype = (wc->wc_immed_data & 0xFFFF);
1404 1395 wc->wc_pkey_ix = (wc->wc_immed_data &
1405 1396 ((1 << state->hs_queryport.log_max_pkey) - 1));
1406 1397 /*
1407 1398 * Fill in "bytes transferred" as appropriate. Also,
1408 1399 * if necessary, fill in the "path bits" field.
1409 1400 */
1410 1401 wc->wc_path_bits = HERMON_CQE_PATHBITS_GET(cq, cqe);
1411 1402 wc->wc_bytes_xfer = HERMON_CQE_BYTECNT_GET(cq, cqe);
1412 1403
1413 1404 /*
1414 1405 * Check for GRH, update the flags, then fill in "wc_flags"
1415 1406 * field in the work completion
1416 1407 */
1417 1408 if (HERMON_CQE_GRH_GET(cq, cqe) != 0) {
1418 1409 flags |= IBT_WC_GRH_PRESENT;
1419 1410 }
1420 1411
1421 1412 /* Receive CQE */
1422 1413 switch (opcode) {
1423 1414 case HERMON_CQE_RCV_SEND_IMM:
1424 1415 /*
1425 1416 * Note: According to the PRM, all QP1 recv
1426 1417 * completions look like the result of a Send with
1427 1418 * Immediate. They are not, however, (MADs are Send
1428 1419 * Only) so we need to check the QP number and set
1429 1420 * the flag only if it is non-QP1.
1430 1421 */
1431 1422 qpnum = HERMON_CQE_QPNUM_GET(cq, cqe);
1432 1423 qp1_indx = state->hs_spec_qp1->hr_indx;
1433 1424 if ((qpnum < qp1_indx) || (qpnum > qp1_indx + 1)) {
1434 1425 flags |= IBT_WC_IMMED_DATA_PRESENT;
1435 1426 }
1436 1427 /* FALLTHROUGH */
1437 1428
1438 1429 case HERMON_CQE_RCV_SEND:
1439 1430 type = IBT_WRC_RECV;
1440 1431 if (HERMON_CQE_IS_IPOK(cq, cqe)) {
1441 1432 wc->wc_cksum = HERMON_CQE_CKSUM(cq, cqe);
1442 1433 flags |= IBT_WC_CKSUM_OK;
1443 1434 wc->wc_detail = IBT_WC_DETAIL_ALL_FLAGS_MASK &
1444 1435 HERMON_CQE_IPOIB_STATUS(cq, cqe);
1445 1436 }
1446 1437 break;
1447 1438
1448 1439 case HERMON_CQE_RCV_SEND_INV:
1449 1440 type = IBT_WRC_RECV;
1450 1441 flags |= IBT_WC_RKEY_INVALIDATED;
1451 1442 wc->wc_rkey = wc->wc_immed_data; /* same field in cqe */
1452 1443 break;
1453 1444
1454 1445 case HERMON_CQE_RCV_RDMAWR_IMM:
1455 1446 flags |= IBT_WC_IMMED_DATA_PRESENT;
1456 1447 type = IBT_WRC_RECV_RDMAWI;
1457 1448 break;
1458 1449
1459 1450 default:
1460 1451
1461 1452 HERMON_WARNING(state, "unknown recv CQE type");
1462 1453 wc->wc_status = IBT_WC_LOCAL_QP_OP_ERR;
1463 1454 return;
1464 1455 }
1465 1456 }
1466 1457 wc->wc_type = type;
1467 1458 wc->wc_flags = flags;
1468 1459 wc->wc_status = IBT_WC_SUCCESS;
1469 1460 }
1470 1461
1471 1462 /*
1472 1463 * hermon_cq_errcqe_consume()
1473 1464 * Context: Can be called from interrupt or base context.
1474 1465 */
1475 1466 static void
1476 1467 hermon_cq_errcqe_consume(hermon_state_t *state, hermon_cqhdl_t cq,
1477 1468 hermon_hw_cqe_t *cqe, ibt_wc_t *wc)
1478 1469 {
1479 1470 uint32_t imm_eth_pkey_cred;
1480 1471 uint_t status;
1481 1472 ibt_wc_status_t ibt_status;
1482 1473
1483 1474 /*
1484 1475 * Fetch the Work Request ID using the information in the CQE.
1485 1476 * See hermon_wr.c for more details.
1486 1477 */
1487 1478 wc->wc_id = hermon_wrid_get_entry(cq, cqe);
1488 1479
1489 1480 /*
1490 1481 * Parse the CQE opcode to determine completion type. We know that
1491 1482 * the CQE is an error completion, so we extract only the completion
1492 1483 * status/syndrome here.
1493 1484 */
1494 1485 imm_eth_pkey_cred = HERMON_CQE_ERROR_SYNDROME_GET(cq, cqe);
1495 1486 status = imm_eth_pkey_cred;
1496 1487 if (status != HERMON_CQE_WR_FLUSHED_ERR)
1497 1488 IBTF_DPRINTF_L2("CQE ERR", "cqe %p QPN %x indx %x status 0x%x "
1498 1489 "vendor syndrome %x", cqe, HERMON_CQE_QPNUM_GET(cq, cqe),
1499 1490 HERMON_CQE_WQECNTR_GET(cq, cqe), status,
1500 1491 HERMON_CQE_ERROR_VENDOR_SYNDROME_GET(cq, cqe));
1501 1492 switch (status) {
1502 1493 case HERMON_CQE_LOC_LEN_ERR:
1503 1494 HERMON_WARNING(state, HERMON_FMA_LOCLEN);
1504 1495 ibt_status = IBT_WC_LOCAL_LEN_ERR;
1505 1496 break;
1506 1497
1507 1498 case HERMON_CQE_LOC_OP_ERR:
1508 1499 HERMON_WARNING(state, HERMON_FMA_LOCQPOP);
1509 1500 ibt_status = IBT_WC_LOCAL_QP_OP_ERR;
1510 1501 break;
1511 1502
1512 1503 case HERMON_CQE_LOC_PROT_ERR:
1513 1504 HERMON_WARNING(state, HERMON_FMA_LOCPROT);
1514 1505 ibt_status = IBT_WC_LOCAL_PROTECT_ERR;
1515 1506 IBTF_DPRINTF_L2("ERRCQE", "is at %p", cqe);
1516 1507 if (hermon_should_panic) {
1517 1508 cmn_err(CE_PANIC, "Hermon intentional PANIC - "
1518 1509 "Local Protection Error\n");
1519 1510 }
1520 1511 break;
1521 1512
1522 1513 case HERMON_CQE_WR_FLUSHED_ERR:
1523 1514 ibt_status = IBT_WC_WR_FLUSHED_ERR;
1524 1515 break;
1525 1516
1526 1517 case HERMON_CQE_MW_BIND_ERR:
1527 1518 HERMON_WARNING(state, HERMON_FMA_MWBIND);
1528 1519 ibt_status = IBT_WC_MEM_WIN_BIND_ERR;
1529 1520 break;
1530 1521
1531 1522 case HERMON_CQE_BAD_RESPONSE_ERR:
1532 1523 HERMON_WARNING(state, HERMON_FMA_RESP);
1533 1524 ibt_status = IBT_WC_BAD_RESPONSE_ERR;
1534 1525 break;
1535 1526
1536 1527 case HERMON_CQE_LOCAL_ACCESS_ERR:
1537 1528 HERMON_WARNING(state, HERMON_FMA_LOCACC);
1538 1529 ibt_status = IBT_WC_LOCAL_ACCESS_ERR;
1539 1530 break;
1540 1531
1541 1532 case HERMON_CQE_REM_INV_REQ_ERR:
1542 1533 HERMON_WARNING(state, HERMON_FMA_REMREQ);
1543 1534 ibt_status = IBT_WC_REMOTE_INVALID_REQ_ERR;
1544 1535 break;
1545 1536
1546 1537 case HERMON_CQE_REM_ACC_ERR:
1547 1538 HERMON_WARNING(state, HERMON_FMA_REMACC);
1548 1539 ibt_status = IBT_WC_REMOTE_ACCESS_ERR;
1549 1540 break;
1550 1541
1551 1542 case HERMON_CQE_REM_OP_ERR:
1552 1543 HERMON_WARNING(state, HERMON_FMA_REMOP);
1553 1544 ibt_status = IBT_WC_REMOTE_OP_ERR;
1554 1545 break;
1555 1546
1556 1547 case HERMON_CQE_TRANS_TO_ERR:
1557 1548 HERMON_WARNING(state, HERMON_FMA_XPORTCNT);
1558 1549 ibt_status = IBT_WC_TRANS_TIMEOUT_ERR;
1559 1550 break;
1560 1551
1561 1552 case HERMON_CQE_RNRNAK_TO_ERR:
1562 1553 HERMON_WARNING(state, HERMON_FMA_RNRCNT);
1563 1554 ibt_status = IBT_WC_RNR_NAK_TIMEOUT_ERR;
1564 1555 break;
1565 1556
1566 1557 /*
1567 1558 * The following error codes are not supported in the Hermon driver
1568 1559 * as they relate only to Reliable Datagram completion statuses:
1569 1560 * case HERMON_CQE_LOCAL_RDD_VIO_ERR:
1570 1561 * case HERMON_CQE_REM_INV_RD_REQ_ERR:
1571 1562 * case HERMON_CQE_EEC_REM_ABORTED_ERR:
1572 1563 * case HERMON_CQE_INV_EEC_NUM_ERR:
1573 1564 * case HERMON_CQE_INV_EEC_STATE_ERR:
1574 1565 * case HERMON_CQE_LOC_EEC_ERR:
1575 1566 */
1576 1567
1577 1568 default:
1578 1569 HERMON_WARNING(state, "unknown error CQE status");
1579 1570 HERMON_FMANOTE(state, HERMON_FMA_UNKN);
1580 1571 ibt_status = IBT_WC_LOCAL_QP_OP_ERR;
1581 1572 break;
1582 1573 }
1583 1574
1584 1575 wc->wc_status = ibt_status;
1585 1576 }
1586 1577
1587 1578
1588 1579 /*
1589 1580 * hermon_cq_resize_helper()
1590 1581 * Context: Can be called only from user or kernel context.
1591 1582 */
1592 1583 void
1593 1584 hermon_cq_resize_helper(hermon_state_t *state, hermon_cqhdl_t cq)
1594 1585 {
1595 1586 hermon_cqhdl_t resize_hdl;
1596 1587 int status;
1597 1588
1598 1589 /*
1599 1590 * we're here because we found the special cqe opcode, so we have
1600 1591 * to update the cq_handle, release the old resources, clear the
1601 1592 * flag in the cq_hdl, and release the resize_hdl. When we return
1602 1593 * above, it will take care of the rest
1603 1594 */
1604 1595 ASSERT(MUTEX_HELD(&cq->cq_lock));
1605 1596
1606 1597 resize_hdl = cq->cq_resize_hdl;
1607 1598
1608 1599 /*
1609 1600 * Deregister the memory for the old Completion Queue. Note: We
1610 1601 * really can't return error here because we have no good way to
1611 1602 * cleanup. Plus, the deregistration really shouldn't ever happen.
1612 1603 * So, if it does, it is an indication that something has gone
1613 1604 * seriously wrong. So we print a warning message and return error
1614 1605 * (knowing, of course, that the "old" CQ memory will be leaked)
1615 1606 */
1616 1607 status = hermon_mr_deregister(state, &cq->cq_mrhdl, HERMON_MR_DEREG_ALL,
1617 1608 HERMON_SLEEP);
1618 1609 if (status != DDI_SUCCESS) {
1619 1610 HERMON_WARNING(state, "failed to deregister old CQ memory");
1620 1611 }
1621 1612
1622 1613 /* Next, free the memory from the old CQ buffer */
1623 1614 hermon_queue_free(&cq->cq_cqinfo);
1624 1615
1625 1616 /* now we can update the cq_hdl with the new things saved */
1626 1617
1627 1618 cq->cq_buf = resize_hdl->cq_buf;
1628 1619 cq->cq_mrhdl = resize_hdl->cq_mrhdl;
1629 1620 cq->cq_bufsz = resize_hdl->cq_bufsz;
1630 1621 cq->cq_log_cqsz = resize_hdl->cq_log_cqsz;
1631 1622 cq->cq_umap_dhp = cq->cq_resize_hdl->cq_umap_dhp;
1632 1623 cq->cq_resize_hdl = 0;
1633 1624 bcopy(&resize_hdl->cq_cqinfo, &cq->cq_cqinfo,
1634 1625 sizeof (struct hermon_qalloc_info_s));
1635 1626
1636 1627 /* finally, release the resizing handle */
1637 1628 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
1638 1629 }
1639 1630
1640 1631
1641 1632 /*
1642 1633 * hermon_cq_entries_flush()
1643 1634 * Context: Can be called from interrupt or base context.
1644 1635 */
1645 1636 /* ARGSUSED */
1646 1637 void
1647 1638 hermon_cq_entries_flush(hermon_state_t *state, hermon_qphdl_t qp)
1648 1639 {
1649 1640 hermon_cqhdl_t cq;
1650 1641 hermon_hw_cqe_t *cqe, *next_cqe;
1651 1642 hermon_srqhdl_t srq;
1652 1643 hermon_workq_hdr_t *wq;
1653 1644 uint32_t cons_indx, tail_cons_indx, wrap_around_mask;
1654 1645 uint32_t new_indx, check_indx, qpnum;
1655 1646 uint32_t shift, mask;
1656 1647 int outstanding_cqes;
1657 1648
1658 1649 qpnum = qp->qp_qpnum;
1659 1650 if ((srq = qp->qp_srqhdl) != NULL)
1660 1651 wq = qp->qp_srqhdl->srq_wq_wqhdr;
1661 1652 else
1662 1653 wq = NULL;
1663 1654 cq = qp->qp_rq_cqhdl;
1664 1655
1665 1656 if (cq == NULL) {
1666 1657 cq = qp->qp_sq_cqhdl;
1667 1658 }
1668 1659
1669 1660 do_send_cq: /* loop back to here if send_cq is not the same as recv_cq */
1670 1661 if (cq == NULL)
1671 1662 return;
1672 1663
1673 1664 cons_indx = cq->cq_consindx;
1674 1665 shift = cq->cq_log_cqsz;
1675 1666 mask = cq->cq_bufsz;
1676 1667 wrap_around_mask = mask - 1;
1677 1668
1678 1669 /* Calculate the pointer to the first CQ entry */
1679 1670 cqe = &cq->cq_buf[cons_indx & wrap_around_mask];
1680 1671
1681 1672 /*
1682 1673 * Loop through the CQ looking for entries owned by software. If an
1683 1674 * entry is owned by software then we increment an 'outstanding_cqes'
1684 1675 * count to know how many entries total we have on our CQ. We use this
1685 1676 * value further down to know how many entries to loop through looking
1686 1677 * for our same QP number.
1687 1678 */
1688 1679 outstanding_cqes = 0;
1689 1680 tail_cons_indx = cons_indx;
1690 1681 while (HERMON_CQE_OWNER_IS_SW(cq, cqe, tail_cons_indx, shift, mask)) {
1691 1682 /* increment total cqes count */
1692 1683 outstanding_cqes++;
1693 1684
1694 1685 /* increment the consumer index */
1695 1686 tail_cons_indx++;
1696 1687
1697 1688 /* update the pointer to the next cq entry */
1698 1689 cqe = &cq->cq_buf[tail_cons_indx & wrap_around_mask];
1699 1690 }
1700 1691
1701 1692 /*
1702 1693 * Using the 'tail_cons_indx' that was just set, we now know how many
1703 1694 * total CQEs possible there are. Set the 'check_indx' and the
1704 1695 * 'new_indx' to the last entry identified by 'tail_cons_indx'
1705 1696 */
1706 1697 check_indx = new_indx = (tail_cons_indx - 1);
1707 1698
1708 1699 while (--outstanding_cqes >= 0) {
1709 1700 cqe = &cq->cq_buf[check_indx & wrap_around_mask];
1710 1701
1711 1702 /*
1712 1703 * If the QP number is the same in the CQE as the QP, then
1713 1704 * we must "consume" it. If it is for an SRQ wqe, then we
1714 1705 * also must free the wqe back onto the free list of the SRQ.
1715 1706 */
1716 1707 if (qpnum == HERMON_CQE_QPNUM_GET(cq, cqe)) {
1717 1708 if (srq && (HERMON_CQE_SENDRECV_GET(cq, cqe) ==
1718 1709 HERMON_COMPLETION_RECV)) {
1719 1710 uint64_t *desc;
1720 1711 int indx;
1721 1712
1722 1713 /* Add wqe back to SRQ free list */
1723 1714 indx = HERMON_CQE_WQEADDRSZ_GET(cq, cqe) &
1724 1715 wq->wq_mask;
1725 1716 desc = HERMON_SRQ_WQE_ADDR(srq, wq->wq_tail);
1726 1717 ((uint16_t *)desc)[1] = htons(indx);
1727 1718 wq->wq_tail = indx;
1728 1719 }
1729 1720 } else { /* CQEs for other QPNs need to remain */
1730 1721 if (check_indx != new_indx) {
1731 1722 next_cqe =
1732 1723 &cq->cq_buf[new_indx & wrap_around_mask];
1733 1724 /* Copy the CQE into the "next_cqe" pointer. */
1734 1725 bcopy(cqe, next_cqe, sizeof (hermon_hw_cqe_t));
1735 1726 }
1736 1727 new_indx--; /* move index to next CQE to fill */
1737 1728 }
1738 1729 check_indx--; /* move index to next CQE to check */
1739 1730 }
1740 1731
1741 1732 /*
1742 1733 * Update consumer index to be the 'new_indx'. This moves it past all
1743 1734 * removed entries. Because 'new_indx' is pointing to the last
1744 1735 * previously valid SW owned entry, we add 1 to point the cons_indx to
1745 1736 * the first HW owned entry.
1746 1737 */
1747 1738 cons_indx = (new_indx + 1);
1748 1739
1749 1740 /*
1750 1741 * Now we only ring the doorbell (to update the consumer index) if
1751 1742 * we've actually consumed a CQ entry. If we found no QP number
1752 1743 * matches above, then we would not have removed anything. So only if
1753 1744 * something was removed do we ring the doorbell.
1754 1745 */
1755 1746 if (cq->cq_consindx != cons_indx) {
1756 1747 /*
1757 1748 * Update the consumer index in both the CQ handle and the
1758 1749 * doorbell record.
1759 1750 */
1760 1751 cq->cq_consindx = cons_indx;
1761 1752
1762 1753 hermon_cq_update_ci_doorbell(cq);
1763 1754
1764 1755 }
1765 1756 if (cq != qp->qp_sq_cqhdl) {
1766 1757 cq = qp->qp_sq_cqhdl;
1767 1758 goto do_send_cq;
1768 1759 }
1769 1760 }
1770 1761
1771 1762 /*
1772 1763 * hermon_get_cq_sched_list()
1773 1764 * Context: Only called from attach() path context
1774 1765 *
1775 1766 * Read properties, creating entries in hs_cq_sched_list with
1776 1767 * information about the requested "expected" and "minimum"
1777 1768 * number of MSI-X interrupt vectors per list entry.
1778 1769 */
1779 1770 static int
1780 1771 hermon_get_cq_sched_list(hermon_state_t *state)
1781 1772 {
1782 1773 char **listp, ulp_prop[HERMON_CQH_MAX + 4];
1783 1774 uint_t nlist, i, j, ndata;
1784 1775 int *data;
1785 1776 size_t len;
1786 1777 hermon_cq_sched_t *cq_schedp;
1787 1778
1788 1779 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, state->hs_dip,
1789 1780 DDI_PROP_DONTPASS, "cqh-group-list", &listp, &nlist) !=
1790 1781 DDI_PROP_SUCCESS)
1791 1782 return (0);
1792 1783
1793 1784 state->hs_cq_sched_array_size = nlist;
1794 1785 state->hs_cq_sched_array = cq_schedp = kmem_zalloc(nlist *
1795 1786 sizeof (hermon_cq_sched_t), KM_SLEEP);
1796 1787 for (i = 0; i < nlist; i++) {
1797 1788 if ((len = strlen(listp[i])) >= HERMON_CQH_MAX) {
1798 1789 cmn_err(CE_CONT, "'cqh' property name too long\n");
1799 1790 goto game_over;
1800 1791 }
1801 1792 for (j = 0; j < i; j++) {
1802 1793 if (strcmp(listp[j], listp[i]) == 0) {
1803 1794 cmn_err(CE_CONT, "Duplicate 'cqh' property\n");
1804 1795 goto game_over;
1805 1796 }
1806 1797 }
1807 1798 (void) strncpy(cq_schedp[i].cqs_name, listp[i], HERMON_CQH_MAX);
1808 1799 ulp_prop[0] = 'c';
1809 1800 ulp_prop[1] = 'q';
1810 1801 ulp_prop[2] = 'h';
1811 1802 ulp_prop[3] = '-';
1812 1803 (void) strncpy(ulp_prop + 4, listp[i], len + 1);
1813 1804 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, state->hs_dip,
1814 1805 DDI_PROP_DONTPASS, ulp_prop, &data, &ndata) !=
1815 1806 DDI_PROP_SUCCESS) {
1816 1807 cmn_err(CE_CONT, "property '%s' not found\n", ulp_prop);
1817 1808 goto game_over;
1818 1809 }
1819 1810 if (ndata != 2) {
1820 1811 cmn_err(CE_CONT, "property '%s' does not "
1821 1812 "have 2 integers\n", ulp_prop);
1822 1813 goto game_over_free_data;
1823 1814 }
1824 1815 cq_schedp[i].cqs_desired = data[0];
1825 1816 cq_schedp[i].cqs_minimum = data[1];
1826 1817 cq_schedp[i].cqs_refcnt = 0;
1827 1818 ddi_prop_free(data);
1828 1819 }
1829 1820 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, state->hs_dip,
1830 1821 DDI_PROP_DONTPASS, "cqh-default", &data, &ndata) !=
1831 1822 DDI_PROP_SUCCESS) {
1832 1823 cmn_err(CE_CONT, "property 'cqh-default' not found\n");
1833 1824 goto game_over;
1834 1825 }
1835 1826 if (ndata != 2) {
1836 1827 cmn_err(CE_CONT, "property 'cqh-default' does not "
1837 1828 "have 2 integers\n");
1838 1829 goto game_over_free_data;
1839 1830 }
1840 1831 cq_schedp = &state->hs_cq_sched_default;
1841 1832 cq_schedp->cqs_desired = data[0];
1842 1833 cq_schedp->cqs_minimum = data[1];
1843 1834 cq_schedp->cqs_refcnt = 0;
1844 1835 ddi_prop_free(data);
1845 1836 ddi_prop_free(listp);
1846 1837 return (1); /* game on */
1847 1838
1848 1839 game_over_free_data:
1849 1840 ddi_prop_free(data);
1850 1841 game_over:
1851 1842 cmn_err(CE_CONT, "Error in 'cqh' properties in hermon.conf\n");
1852 1843 cmn_err(CE_CONT, "completion handler groups not being used\n");
1853 1844 kmem_free(cq_schedp, nlist * sizeof (hermon_cq_sched_t));
1854 1845 state->hs_cq_sched_array_size = 0;
1855 1846 ddi_prop_free(listp);
1856 1847 return (0);
1857 1848 }
1858 1849
1859 1850 /*
1860 1851 * hermon_cq_sched_init()
1861 1852 * Context: Only called from attach() path context
1862 1853 *
1863 1854 * Read the hermon.conf properties looking for cq_sched info,
1864 1855 * creating reserved pools of MSI-X interrupt ranges for the
1865 1856 * specified ULPs.
1866 1857 */
1867 1858 int
1868 1859 hermon_cq_sched_init(hermon_state_t *state)
1869 1860 {
1870 1861 hermon_cq_sched_t *cq_schedp, *defp;
1871 1862 int i, desired, array_size;
1872 1863
1873 1864 mutex_init(&state->hs_cq_sched_lock, NULL, MUTEX_DRIVER,
1874 1865 DDI_INTR_PRI(state->hs_intrmsi_pri));
1875 1866
1876 1867 mutex_enter(&state->hs_cq_sched_lock);
1877 1868 state->hs_cq_sched_array = NULL;
1878 1869
1879 1870 /* initialize cq_sched_default */
1880 1871 defp = &state->hs_cq_sched_default;
1881 1872 defp->cqs_start_hid = 1;
1882 1873 defp->cqs_len = state->hs_intrmsi_allocd;
1883 1874 defp->cqs_next_alloc = defp->cqs_len - 1;
1884 1875 (void) strncpy(defp->cqs_name, "default", 8);
1885 1876
1886 1877 /* Read properties to determine which ULPs use cq_sched */
1887 1878 if (hermon_get_cq_sched_list(state) == 0)
1888 1879 goto done;
1889 1880
1890 1881 /* Determine if we have enough vectors, or if we have to scale down */
1891 1882 desired = defp->cqs_desired; /* default desired (from hermon.conf) */
1892 1883 if (desired <= 0)
1893 1884 goto done; /* all interrupts in the default pool */
1894 1885 cq_schedp = state->hs_cq_sched_array;
1895 1886 array_size = state->hs_cq_sched_array_size;
1896 1887 for (i = 0; i < array_size; i++)
1897 1888 desired += cq_schedp[i].cqs_desired;
1898 1889 if (desired > state->hs_intrmsi_allocd) {
1899 1890 cmn_err(CE_CONT, "#interrupts allocated (%d) is less than "
1900 1891 "the #interrupts desired (%d)\n",
1901 1892 state->hs_intrmsi_allocd, desired);
1902 1893 cmn_err(CE_CONT, "completion handler groups not being used\n");
1903 1894 goto done; /* all interrupts in the default pool */
1904 1895 }
1905 1896 /* Game on. For each cq_sched group, reserve the MSI-X range */
1906 1897 for (i = 0; i < array_size; i++) {
1907 1898 desired = cq_schedp[i].cqs_desired;
1908 1899 cq_schedp[i].cqs_start_hid = defp->cqs_start_hid;
1909 1900 cq_schedp[i].cqs_len = desired;
1910 1901 cq_schedp[i].cqs_next_alloc = desired - 1;
1911 1902 defp->cqs_len -= desired;
1912 1903 defp->cqs_start_hid += desired;
1913 1904 }
1914 1905 /* reset default's start allocation seed */
1915 1906 state->hs_cq_sched_default.cqs_next_alloc =
1916 1907 state->hs_cq_sched_default.cqs_len - 1;
1917 1908
1918 1909 done:
1919 1910 mutex_exit(&state->hs_cq_sched_lock);
1920 1911 return (IBT_SUCCESS);
1921 1912 }
1922 1913
1923 1914 void
1924 1915 hermon_cq_sched_fini(hermon_state_t *state)
1925 1916 {
1926 1917 mutex_enter(&state->hs_cq_sched_lock);
1927 1918 if (state->hs_cq_sched_array_size) {
1928 1919 kmem_free(state->hs_cq_sched_array, sizeof (hermon_cq_sched_t) *
1929 1920 state->hs_cq_sched_array_size);
1930 1921 state->hs_cq_sched_array_size = 0;
1931 1922 state->hs_cq_sched_array = NULL;
1932 1923 }
1933 1924 mutex_exit(&state->hs_cq_sched_lock);
1934 1925 mutex_destroy(&state->hs_cq_sched_lock);
1935 1926 }
1936 1927
1937 1928 int
1938 1929 hermon_cq_sched_alloc(hermon_state_t *state, ibt_cq_sched_attr_t *attr,
1939 1930 hermon_cq_sched_t **cq_sched_pp)
1940 1931 {
1941 1932 hermon_cq_sched_t *cq_schedp;
1942 1933 int i;
1943 1934 char *name;
1944 1935 ibt_cq_sched_flags_t flags;
1945 1936
1946 1937 flags = attr->cqs_flags;
1947 1938 if ((flags & (IBT_CQS_SCHED_GROUP | IBT_CQS_EXACT_SCHED_GROUP)) == 0) {
1948 1939 *cq_sched_pp = NULL;
1949 1940 return (IBT_SUCCESS);
1950 1941 }
1951 1942 name = attr->cqs_pool_name;
1952 1943
1953 1944 mutex_enter(&state->hs_cq_sched_lock);
1954 1945 cq_schedp = state->hs_cq_sched_array;
1955 1946 for (i = 0; i < state->hs_cq_sched_array_size; i++, cq_schedp++) {
1956 1947 if (strcmp(name, cq_schedp->cqs_name) == 0) {
1957 1948 if (cq_schedp->cqs_len != 0)
1958 1949 cq_schedp->cqs_refcnt++;
1959 1950 break; /* found it */
1960 1951 }
1961 1952 }
1962 1953 if ((i == state->hs_cq_sched_array_size) || /* not found, or */
1963 1954 (cq_schedp->cqs_len == 0)) /* defined, but no dedicated intr's */
1964 1955 cq_schedp = NULL;
1965 1956 mutex_exit(&state->hs_cq_sched_lock);
1966 1957
1967 1958 *cq_sched_pp = cq_schedp; /* set to valid hdl, or to NULL */
1968 1959 if ((cq_schedp == NULL) &&
1969 1960 (attr->cqs_flags & IBT_CQS_EXACT_SCHED_GROUP))
1970 1961 return (IBT_CQ_NO_SCHED_GROUP);
1971 1962 else
1972 1963 return (IBT_SUCCESS);
1973 1964 }
1974 1965
1975 1966 int
1976 1967 hermon_cq_sched_free(hermon_state_t *state, hermon_cq_sched_t *cq_schedp)
1977 1968 {
1978 1969 if (cq_schedp != NULL) {
1979 1970 /* Just decrement refcnt */
1980 1971 mutex_enter(&state->hs_cq_sched_lock);
1981 1972 if (cq_schedp->cqs_refcnt == 0)
1982 1973 HERMON_WARNING(state, "cq_sched free underflow\n");
1983 1974 else
1984 1975 cq_schedp->cqs_refcnt--;
1985 1976 mutex_exit(&state->hs_cq_sched_lock);
1986 1977 }
1987 1978 return (IBT_SUCCESS);
1988 1979 }
↓ open down ↓ |
655 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX