67 int
68 hermon_cq_alloc(hermon_state_t *state, ibt_cq_hdl_t ibt_cqhdl,
69 ibt_cq_attr_t *cq_attr, uint_t *actual_size, hermon_cqhdl_t *cqhdl,
70 uint_t sleepflag)
71 {
72 hermon_rsrc_t *cqc, *rsrc;
73 hermon_umap_db_entry_t *umapdb;
74 hermon_hw_cqc_t cqc_entry;
75 hermon_cqhdl_t cq;
76 ibt_mr_attr_t mr_attr;
77 hermon_mr_options_t op;
78 hermon_pdhdl_t pd;
79 hermon_mrhdl_t mr;
80 hermon_hw_cqe_t *buf;
81 uint64_t value;
82 uint32_t log_cq_size, uarpg;
83 uint_t cq_is_umap;
84 uint32_t status, flag;
85 hermon_cq_sched_t *cq_schedp;
86
87 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq_attr))
88
89 /*
90 * Determine whether CQ is being allocated for userland access or
91 * whether it is being allocated for kernel access. If the CQ is
92 * being allocated for userland access, then lookup the UAR
93 * page number for the current process. Note: If this is not found
94 * (e.g. if the process has not previously open()'d the Hermon driver),
95 * then an error is returned.
96 */
97 cq_is_umap = (cq_attr->cq_flags & IBT_CQ_USER_MAP) ? 1 : 0;
98 if (cq_is_umap) {
99 status = hermon_umap_db_find(state->hs_instance, ddi_get_pid(),
100 MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
101 if (status != DDI_SUCCESS) {
102 status = IBT_INVALID_PARAM;
103 goto cqalloc_fail;
104 }
105 uarpg = ((hermon_rsrc_t *)(uintptr_t)value)->hr_indx;
106 } else {
107 uarpg = state->hs_kernel_uar_index;
108 }
121 * reference count.
122 */
123 status = hermon_rsrc_alloc(state, HERMON_CQC, 1, sleepflag, &cqc);
124 if (status != DDI_SUCCESS) {
125 status = IBT_INSUFF_RESOURCE;
126 goto cqalloc_fail1;
127 }
128
129 /*
130 * Allocate the software structure for tracking the completion queue
131 * (i.e. the Hermon Completion Queue handle). If we fail here, we must
132 * undo the protection domain reference count and the previous
133 * resource allocation.
134 */
135 status = hermon_rsrc_alloc(state, HERMON_CQHDL, 1, sleepflag, &rsrc);
136 if (status != DDI_SUCCESS) {
137 status = IBT_INSUFF_RESOURCE;
138 goto cqalloc_fail2;
139 }
140 cq = (hermon_cqhdl_t)rsrc->hr_addr;
141 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq))
142 cq->cq_is_umap = cq_is_umap;
143 cq->cq_cqnum = cqc->hr_indx; /* just use index, implicit in Hermon */
144 cq->cq_intmod_count = 0;
145 cq->cq_intmod_usec = 0;
146
147 /*
148 * If this will be a user-mappable CQ, then allocate an entry for
149 * the "userland resources database". This will later be added to
150 * the database (after all further CQ operations are successful).
151 * If we fail here, we must undo the reference counts and the
152 * previous resource allocation.
153 */
154 if (cq->cq_is_umap) {
155 umapdb = hermon_umap_db_alloc(state->hs_instance, cq->cq_cqnum,
156 MLNX_UMAP_CQMEM_RSRC, (uint64_t)(uintptr_t)rsrc);
157 if (umapdb == NULL) {
158 status = IBT_INSUFF_RESOURCE;
159 goto cqalloc_fail3;
160 }
161 }
205 * completion queues do not have the same strict alignment
206 * requirements. It is sufficient for the CQ memory to be both
207 * aligned to and bound to addresses which are a multiple of CQE size.
208 */
209 cq->cq_cqinfo.qa_size = (1 << log_cq_size) * sizeof (hermon_hw_cqe_t);
210
211 cq->cq_cqinfo.qa_alloc_align = PAGESIZE;
212 cq->cq_cqinfo.qa_bind_align = PAGESIZE;
213 if (cq->cq_is_umap) {
214 cq->cq_cqinfo.qa_location = HERMON_QUEUE_LOCATION_USERLAND;
215 } else {
216 cq->cq_cqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
217 hermon_arm_cq_dbr_init(cq->cq_arm_ci_vdbr);
218 }
219 status = hermon_queue_alloc(state, &cq->cq_cqinfo, sleepflag);
220 if (status != DDI_SUCCESS) {
221 status = IBT_INSUFF_RESOURCE;
222 goto cqalloc_fail4;
223 }
224 buf = (hermon_hw_cqe_t *)cq->cq_cqinfo.qa_buf_aligned;
225 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))
226
227 /*
228 * The ownership bit of the CQE's is set by the HW during the process
229 * of transferrring ownership of the CQ (PRM 09.35c, 14.2.1, note D1
230 *
231 */
232
233 /*
234 * Register the memory for the CQ. The memory for the CQ must
235 * be registered in the Hermon TPT tables. This gives us the LKey
236 * to specify in the CQ context below. Note: If this is a user-
237 * mappable CQ, then we will force DDI_DMA_CONSISTENT mapping.
238 */
239 flag = (sleepflag == HERMON_SLEEP) ? IBT_MR_SLEEP : IBT_MR_NOSLEEP;
240 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
241 mr_attr.mr_len = cq->cq_cqinfo.qa_size;
242 mr_attr.mr_as = NULL;
243 mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
244 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
245 op.mro_bind_dmahdl = cq->cq_cqinfo.qa_dmahdl;
246 op.mro_bind_override_addr = 0;
247 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
248 HERMON_CQ_CMPT);
249 if (status != DDI_SUCCESS) {
250 status = IBT_INSUFF_RESOURCE;
251 goto cqalloc_fail5;
252 }
253 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
254
255 cq->cq_erreqnum = HERMON_CQ_ERREQNUM_GET(state);
256 if (cq_attr->cq_flags & IBT_CQ_HID) {
257 if (!HERMON_HID_VALID(state, cq_attr->cq_hid)) {
258 IBTF_DPRINTF_L2("CQalloc", "bad handler id 0x%x",
259 cq_attr->cq_hid);
260 status = IBT_INVALID_PARAM;
261 goto cqalloc_fail5;
262 }
263 cq->cq_eqnum = HERMON_HID_TO_EQNUM(state, cq_attr->cq_hid);
264 IBTF_DPRINTF_L2("cqalloc", "hid: eqn %d", cq->cq_eqnum);
265 } else {
266 cq_schedp = (hermon_cq_sched_t *)cq_attr->cq_sched;
267 if (cq_schedp == NULL) {
268 cq_schedp = &state->hs_cq_sched_default;
269 } else if (cq_schedp != &state->hs_cq_sched_default) {
270 int i;
271 hermon_cq_sched_t *tmp;
272
273 tmp = state->hs_cq_sched_array;
471 state->hs_dip, 0, 0, cq->cq_cqinfo.qa_size,
472 maxprot, DEVMAP_MAPPING_INVALID, NULL);
473 if (status != DDI_SUCCESS) {
474 mutex_exit(&cq->cq_lock);
475 HERMON_WARNING(state, "failed in CQ memory "
476 "devmap_devmem_remap()");
477 return (ibc_get_ci_failure(0));
478 }
479 cq->cq_umap_dhp = (devmap_cookie_t)NULL;
480 }
481 }
482
483 /*
484 * Put NULL into the Arbel CQNum-to-CQHdl list. This will allow any
485 * in-progress events to detect that the CQ corresponding to this
486 * number has been freed.
487 */
488 hermon_icm_set_num_to_hdl(state, HERMON_CQC, cqc->hr_indx, NULL);
489
490 mutex_exit(&cq->cq_lock);
491 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq))
492
493 /*
494 * Reclaim CQC entry from hardware (using the Hermon HW2SW_CQ
495 * firmware command). If the ownership transfer fails for any reason,
496 * then it is an indication that something (either in HW or SW) has
497 * gone seriously wrong.
498 */
499 status = hermon_cmn_ownership_cmd_post(state, HW2SW_CQ, &cqc_entry,
500 sizeof (hermon_hw_cqc_t), cqnum, sleepflag);
501 if (status != HERMON_CMD_SUCCESS) {
502 HERMON_WARNING(state, "failed to reclaim CQC ownership");
503 cmn_err(CE_CONT, "Hermon: HW2SW_CQ command failed: %08x\n",
504 status);
505 if (status == HERMON_CMD_INVALID_STATUS) {
506 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
507 }
508 return (ibc_get_ci_failure(0));
509 }
510
511 /*
634
635 /* first, alloc the resize_handle */
636 resize_hdl = kmem_zalloc(sizeof (struct hermon_sw_cq_s), KM_SLEEP);
637
638 new_cqinfo.qa_size = (1 << log_cq_size) * sizeof (hermon_hw_cqe_t);
639 new_cqinfo.qa_alloc_align = PAGESIZE;
640 new_cqinfo.qa_bind_align = PAGESIZE;
641 if (cq->cq_is_umap) {
642 new_cqinfo.qa_location = HERMON_QUEUE_LOCATION_USERLAND;
643 } else {
644 new_cqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
645 }
646 status = hermon_queue_alloc(state, &new_cqinfo, sleepflag);
647 if (status != DDI_SUCCESS) {
648 /* free the resize handle */
649 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
650 status = IBT_INSUFF_RESOURCE;
651 goto cqresize_fail;
652 }
653 buf = (hermon_hw_cqe_t *)new_cqinfo.qa_buf_aligned;
654 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))
655
656 /*
657 * No initialization of the cq is needed - the command will do it
658 */
659
660 /*
661 * Register the memory for the CQ. The memory for the CQ must
662 * be registered in the Hermon TPT tables. This gives us the LKey
663 * to specify in the CQ context below.
664 */
665 flag = (sleepflag == HERMON_SLEEP) ? IBT_MR_SLEEP : IBT_MR_NOSLEEP;
666 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
667 mr_attr.mr_len = new_cqinfo.qa_size;
668 mr_attr.mr_as = NULL;
669 mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
670 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
671 op.mro_bind_dmahdl = new_cqinfo.qa_dmahdl;
672 op.mro_bind_override_addr = 0;
673 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
674 HERMON_CQ_CMPT);
675 if (status != DDI_SUCCESS) {
676 hermon_queue_free(&new_cqinfo);
677 /* free the resize handle */
678 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
679 status = IBT_INSUFF_RESOURCE;
680 goto cqresize_fail;
681 }
682 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
683
684 /*
685 * Now we grab the CQ lock. Since we will be updating the actual
686 * CQ location and the producer/consumer indexes, we should hold
687 * the lock.
688 *
689 * We do a ARBEL_NOSLEEP here (and below), though, because we are
690 * holding the "cq_lock" and if we got raised to interrupt level
691 * by priority inversion, we would not want to block in this routine
692 * waiting for success.
693 */
694 mutex_enter(&cq->cq_lock);
695
696 /*
697 * Fill in the CQC entry. For the resize operation this is the
698 * final step before attempting the resize operation on the CQC entry.
699 * We use all of the information collected/calculated above to fill
700 * in the requisite portions of the CQC.
701 */
702 bzero(&cqc_entry, sizeof (hermon_hw_cqc_t));
1303 */
1304 opcode = HERMON_CQE_OPCODE_GET(cq, cqe);
1305 if ((opcode == HERMON_CQE_SEND_ERR_OPCODE) ||
1306 (opcode == HERMON_CQE_RECV_ERR_OPCODE)) {
1307 hermon_cq_errcqe_consume(state, cq, cqe, wc);
1308 return;
1309 }
1310
1311 /*
1312 * Fetch the Work Request ID using the information in the CQE.
1313 * See hermon_wr.c for more details.
1314 */
1315 wc->wc_id = hermon_wrid_get_entry(cq, cqe);
1316
1317 /*
1318 * Parse the CQE opcode to determine completion type. This will set
1319 * not only the type of the completion, but also any flags that might
1320 * be associated with it (e.g. whether immediate data is present).
1321 */
1322 flags = IBT_WC_NO_FLAGS;
1323 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(state->hs_fcoib_may_be_running))
1324 if (HERMON_CQE_SENDRECV_GET(cq, cqe) != HERMON_COMPLETION_RECV) {
1325
1326 /* Send CQE */
1327 switch (opcode) {
1328 case HERMON_CQE_SND_RDMAWR_IMM:
1329 case HERMON_CQE_SND_RDMAWR:
1330 type = IBT_WRC_RDMAW;
1331 break;
1332
1333 case HERMON_CQE_SND_SEND_INV:
1334 case HERMON_CQE_SND_SEND_IMM:
1335 case HERMON_CQE_SND_SEND:
1336 type = IBT_WRC_SEND;
1337 break;
1338
1339 case HERMON_CQE_SND_LSO:
1340 type = IBT_WRC_SEND_LSO;
1341 break;
1342
1343 case HERMON_CQE_SND_RDMARD:
|
67 int
68 hermon_cq_alloc(hermon_state_t *state, ibt_cq_hdl_t ibt_cqhdl,
69 ibt_cq_attr_t *cq_attr, uint_t *actual_size, hermon_cqhdl_t *cqhdl,
70 uint_t sleepflag)
71 {
72 hermon_rsrc_t *cqc, *rsrc;
73 hermon_umap_db_entry_t *umapdb;
74 hermon_hw_cqc_t cqc_entry;
75 hermon_cqhdl_t cq;
76 ibt_mr_attr_t mr_attr;
77 hermon_mr_options_t op;
78 hermon_pdhdl_t pd;
79 hermon_mrhdl_t mr;
80 hermon_hw_cqe_t *buf;
81 uint64_t value;
82 uint32_t log_cq_size, uarpg;
83 uint_t cq_is_umap;
84 uint32_t status, flag;
85 hermon_cq_sched_t *cq_schedp;
86
87 /*
88 * Determine whether CQ is being allocated for userland access or
89 * whether it is being allocated for kernel access. If the CQ is
90 * being allocated for userland access, then lookup the UAR
91 * page number for the current process. Note: If this is not found
92 * (e.g. if the process has not previously open()'d the Hermon driver),
93 * then an error is returned.
94 */
95 cq_is_umap = (cq_attr->cq_flags & IBT_CQ_USER_MAP) ? 1 : 0;
96 if (cq_is_umap) {
97 status = hermon_umap_db_find(state->hs_instance, ddi_get_pid(),
98 MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
99 if (status != DDI_SUCCESS) {
100 status = IBT_INVALID_PARAM;
101 goto cqalloc_fail;
102 }
103 uarpg = ((hermon_rsrc_t *)(uintptr_t)value)->hr_indx;
104 } else {
105 uarpg = state->hs_kernel_uar_index;
106 }
119 * reference count.
120 */
121 status = hermon_rsrc_alloc(state, HERMON_CQC, 1, sleepflag, &cqc);
122 if (status != DDI_SUCCESS) {
123 status = IBT_INSUFF_RESOURCE;
124 goto cqalloc_fail1;
125 }
126
127 /*
128 * Allocate the software structure for tracking the completion queue
129 * (i.e. the Hermon Completion Queue handle). If we fail here, we must
130 * undo the protection domain reference count and the previous
131 * resource allocation.
132 */
133 status = hermon_rsrc_alloc(state, HERMON_CQHDL, 1, sleepflag, &rsrc);
134 if (status != DDI_SUCCESS) {
135 status = IBT_INSUFF_RESOURCE;
136 goto cqalloc_fail2;
137 }
138 cq = (hermon_cqhdl_t)rsrc->hr_addr;
139 cq->cq_is_umap = cq_is_umap;
140 cq->cq_cqnum = cqc->hr_indx; /* just use index, implicit in Hermon */
141 cq->cq_intmod_count = 0;
142 cq->cq_intmod_usec = 0;
143
144 /*
145 * If this will be a user-mappable CQ, then allocate an entry for
146 * the "userland resources database". This will later be added to
147 * the database (after all further CQ operations are successful).
148 * If we fail here, we must undo the reference counts and the
149 * previous resource allocation.
150 */
151 if (cq->cq_is_umap) {
152 umapdb = hermon_umap_db_alloc(state->hs_instance, cq->cq_cqnum,
153 MLNX_UMAP_CQMEM_RSRC, (uint64_t)(uintptr_t)rsrc);
154 if (umapdb == NULL) {
155 status = IBT_INSUFF_RESOURCE;
156 goto cqalloc_fail3;
157 }
158 }
202 * completion queues do not have the same strict alignment
203 * requirements. It is sufficient for the CQ memory to be both
204 * aligned to and bound to addresses which are a multiple of CQE size.
205 */
206 cq->cq_cqinfo.qa_size = (1 << log_cq_size) * sizeof (hermon_hw_cqe_t);
207
208 cq->cq_cqinfo.qa_alloc_align = PAGESIZE;
209 cq->cq_cqinfo.qa_bind_align = PAGESIZE;
210 if (cq->cq_is_umap) {
211 cq->cq_cqinfo.qa_location = HERMON_QUEUE_LOCATION_USERLAND;
212 } else {
213 cq->cq_cqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
214 hermon_arm_cq_dbr_init(cq->cq_arm_ci_vdbr);
215 }
216 status = hermon_queue_alloc(state, &cq->cq_cqinfo, sleepflag);
217 if (status != DDI_SUCCESS) {
218 status = IBT_INSUFF_RESOURCE;
219 goto cqalloc_fail4;
220 }
221 buf = (hermon_hw_cqe_t *)cq->cq_cqinfo.qa_buf_aligned;
222
223 /*
224 * The ownership bit of the CQE's is set by the HW during the process
225 * of transferrring ownership of the CQ (PRM 09.35c, 14.2.1, note D1
226 *
227 */
228
229 /*
230 * Register the memory for the CQ. The memory for the CQ must
231 * be registered in the Hermon TPT tables. This gives us the LKey
232 * to specify in the CQ context below. Note: If this is a user-
233 * mappable CQ, then we will force DDI_DMA_CONSISTENT mapping.
234 */
235 flag = (sleepflag == HERMON_SLEEP) ? IBT_MR_SLEEP : IBT_MR_NOSLEEP;
236 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
237 mr_attr.mr_len = cq->cq_cqinfo.qa_size;
238 mr_attr.mr_as = NULL;
239 mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
240 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
241 op.mro_bind_dmahdl = cq->cq_cqinfo.qa_dmahdl;
242 op.mro_bind_override_addr = 0;
243 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
244 HERMON_CQ_CMPT);
245 if (status != DDI_SUCCESS) {
246 status = IBT_INSUFF_RESOURCE;
247 goto cqalloc_fail5;
248 }
249
250 cq->cq_erreqnum = HERMON_CQ_ERREQNUM_GET(state);
251 if (cq_attr->cq_flags & IBT_CQ_HID) {
252 if (!HERMON_HID_VALID(state, cq_attr->cq_hid)) {
253 IBTF_DPRINTF_L2("CQalloc", "bad handler id 0x%x",
254 cq_attr->cq_hid);
255 status = IBT_INVALID_PARAM;
256 goto cqalloc_fail5;
257 }
258 cq->cq_eqnum = HERMON_HID_TO_EQNUM(state, cq_attr->cq_hid);
259 IBTF_DPRINTF_L2("cqalloc", "hid: eqn %d", cq->cq_eqnum);
260 } else {
261 cq_schedp = (hermon_cq_sched_t *)cq_attr->cq_sched;
262 if (cq_schedp == NULL) {
263 cq_schedp = &state->hs_cq_sched_default;
264 } else if (cq_schedp != &state->hs_cq_sched_default) {
265 int i;
266 hermon_cq_sched_t *tmp;
267
268 tmp = state->hs_cq_sched_array;
466 state->hs_dip, 0, 0, cq->cq_cqinfo.qa_size,
467 maxprot, DEVMAP_MAPPING_INVALID, NULL);
468 if (status != DDI_SUCCESS) {
469 mutex_exit(&cq->cq_lock);
470 HERMON_WARNING(state, "failed in CQ memory "
471 "devmap_devmem_remap()");
472 return (ibc_get_ci_failure(0));
473 }
474 cq->cq_umap_dhp = (devmap_cookie_t)NULL;
475 }
476 }
477
478 /*
479 * Put NULL into the Arbel CQNum-to-CQHdl list. This will allow any
480 * in-progress events to detect that the CQ corresponding to this
481 * number has been freed.
482 */
483 hermon_icm_set_num_to_hdl(state, HERMON_CQC, cqc->hr_indx, NULL);
484
485 mutex_exit(&cq->cq_lock);
486
487 /*
488 * Reclaim CQC entry from hardware (using the Hermon HW2SW_CQ
489 * firmware command). If the ownership transfer fails for any reason,
490 * then it is an indication that something (either in HW or SW) has
491 * gone seriously wrong.
492 */
493 status = hermon_cmn_ownership_cmd_post(state, HW2SW_CQ, &cqc_entry,
494 sizeof (hermon_hw_cqc_t), cqnum, sleepflag);
495 if (status != HERMON_CMD_SUCCESS) {
496 HERMON_WARNING(state, "failed to reclaim CQC ownership");
497 cmn_err(CE_CONT, "Hermon: HW2SW_CQ command failed: %08x\n",
498 status);
499 if (status == HERMON_CMD_INVALID_STATUS) {
500 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
501 }
502 return (ibc_get_ci_failure(0));
503 }
504
505 /*
628
629 /* first, alloc the resize_handle */
630 resize_hdl = kmem_zalloc(sizeof (struct hermon_sw_cq_s), KM_SLEEP);
631
632 new_cqinfo.qa_size = (1 << log_cq_size) * sizeof (hermon_hw_cqe_t);
633 new_cqinfo.qa_alloc_align = PAGESIZE;
634 new_cqinfo.qa_bind_align = PAGESIZE;
635 if (cq->cq_is_umap) {
636 new_cqinfo.qa_location = HERMON_QUEUE_LOCATION_USERLAND;
637 } else {
638 new_cqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL;
639 }
640 status = hermon_queue_alloc(state, &new_cqinfo, sleepflag);
641 if (status != DDI_SUCCESS) {
642 /* free the resize handle */
643 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
644 status = IBT_INSUFF_RESOURCE;
645 goto cqresize_fail;
646 }
647 buf = (hermon_hw_cqe_t *)new_cqinfo.qa_buf_aligned;
648
649 /*
650 * No initialization of the cq is needed - the command will do it
651 */
652
653 /*
654 * Register the memory for the CQ. The memory for the CQ must
655 * be registered in the Hermon TPT tables. This gives us the LKey
656 * to specify in the CQ context below.
657 */
658 flag = (sleepflag == HERMON_SLEEP) ? IBT_MR_SLEEP : IBT_MR_NOSLEEP;
659 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
660 mr_attr.mr_len = new_cqinfo.qa_size;
661 mr_attr.mr_as = NULL;
662 mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
663 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
664 op.mro_bind_dmahdl = new_cqinfo.qa_dmahdl;
665 op.mro_bind_override_addr = 0;
666 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op,
667 HERMON_CQ_CMPT);
668 if (status != DDI_SUCCESS) {
669 hermon_queue_free(&new_cqinfo);
670 /* free the resize handle */
671 kmem_free(resize_hdl, sizeof (struct hermon_sw_cq_s));
672 status = IBT_INSUFF_RESOURCE;
673 goto cqresize_fail;
674 }
675
676 /*
677 * Now we grab the CQ lock. Since we will be updating the actual
678 * CQ location and the producer/consumer indexes, we should hold
679 * the lock.
680 *
681 * We do a ARBEL_NOSLEEP here (and below), though, because we are
682 * holding the "cq_lock" and if we got raised to interrupt level
683 * by priority inversion, we would not want to block in this routine
684 * waiting for success.
685 */
686 mutex_enter(&cq->cq_lock);
687
688 /*
689 * Fill in the CQC entry. For the resize operation this is the
690 * final step before attempting the resize operation on the CQC entry.
691 * We use all of the information collected/calculated above to fill
692 * in the requisite portions of the CQC.
693 */
694 bzero(&cqc_entry, sizeof (hermon_hw_cqc_t));
1295 */
1296 opcode = HERMON_CQE_OPCODE_GET(cq, cqe);
1297 if ((opcode == HERMON_CQE_SEND_ERR_OPCODE) ||
1298 (opcode == HERMON_CQE_RECV_ERR_OPCODE)) {
1299 hermon_cq_errcqe_consume(state, cq, cqe, wc);
1300 return;
1301 }
1302
1303 /*
1304 * Fetch the Work Request ID using the information in the CQE.
1305 * See hermon_wr.c for more details.
1306 */
1307 wc->wc_id = hermon_wrid_get_entry(cq, cqe);
1308
1309 /*
1310 * Parse the CQE opcode to determine completion type. This will set
1311 * not only the type of the completion, but also any flags that might
1312 * be associated with it (e.g. whether immediate data is present).
1313 */
1314 flags = IBT_WC_NO_FLAGS;
1315 if (HERMON_CQE_SENDRECV_GET(cq, cqe) != HERMON_COMPLETION_RECV) {
1316
1317 /* Send CQE */
1318 switch (opcode) {
1319 case HERMON_CQE_SND_RDMAWR_IMM:
1320 case HERMON_CQE_SND_RDMAWR:
1321 type = IBT_WRC_RDMAW;
1322 break;
1323
1324 case HERMON_CQE_SND_SEND_INV:
1325 case HERMON_CQE_SND_SEND_IMM:
1326 case HERMON_CQE_SND_SEND:
1327 type = IBT_WRC_SEND;
1328 break;
1329
1330 case HERMON_CQE_SND_LSO:
1331 type = IBT_WRC_SEND_LSO;
1332 break;
1333
1334 case HERMON_CQE_SND_RDMARD:
|