Print this page
8368 remove warlock leftovers from usr/src/uts


  82         uint64_t                        *desc;
  83         uint32_t                        desc_sz;
  84         uint32_t                        signaled_dbd, solicited;
  85         uint32_t                        head, tail, next_tail, qsize_msk;
  86         uint32_t                        hdrmwqes;
  87         uint32_t                        nopcode, fence, immed_data = 0;
  88         hermon_hw_wqe_sgl_t             *ds, *old_ds;
  89         ibt_wr_ds_t                     *sgl;
  90         int                             nds;
  91         int                             i, j, last_ds, num_ds, status;
  92         uint32_t                        *wqe_start;
  93         int                             sectperwqe;
  94         uint_t                          posted_cnt = 0;
  95         int                             total_len, strong_order, fc_bits, cksum;
  96 
  97 
  98         /* initialize the FMA retry loop */
  99         hermon_pio_init(fm_loop_cnt, fm_status, fm_test_num);
 100 
 101         ASSERT(MUTEX_HELD(&qp->qp_sq_lock));
 102         _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&qp->qp_sq_lock))
 103 
 104         /* Grab the lock for the WRID list */
 105         membar_consumer();
 106 
 107         /* Save away some initial QP state */
 108         wq = qp->qp_sq_wqhdr;
 109         qsize_msk = wq->wq_mask;
 110         hdrmwqes  = qp->qp_sq_hdrmwqes;              /* in WQEs  */
 111         sectperwqe = 1 << (qp->qp_sq_log_wqesz - 2);
 112 
 113         tail      = wq->wq_tail;
 114         head      = wq->wq_head;
 115         status    = DDI_SUCCESS;
 116 
 117 post_next:
 118         /*
 119          * Check for "queue full" condition.  If the queue
 120          * is already full, then no more WQEs can be posted.
 121          * So break out, ring a doorbell (if necessary) and
 122          * return an error


 439         hermon_hw_snd_wqe_bind_t        *bn;
 440         hermon_hw_snd_wqe_frwr_t        *frwr;
 441         hermon_hw_snd_wqe_local_inv_t   *li;
 442         hermon_hw_wqe_sgl_t             *ds;
 443         ibt_wr_ds_t                     *sgl;
 444         int                             nds;
 445         int                             i, last_ds, num_ds;
 446         uint32_t                        *wqe_start;
 447         int                             sectperwqe;
 448         uint_t                          posted_cnt = 0;
 449         int                             strong_order;
 450         int                             print_rdma;
 451         int                             rlen;
 452         uint32_t                        rkey;
 453         uint64_t                        raddr;
 454 
 455         /* initialize the FMA retry loop */
 456         hermon_pio_init(fm_loop_cnt, fm_status, fm_test_num);
 457 
 458         ASSERT(MUTEX_HELD(&qp->qp_sq_lock));
 459         _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&qp->qp_sq_lock))
 460 
 461         /* Save away some initial QP state */
 462         wq = qp->qp_sq_wqhdr;
 463         qsize_msk = wq->wq_mask;
 464         hdrmwqes  = qp->qp_sq_hdrmwqes;              /* in WQEs  */
 465         sectperwqe = 1 << (qp->qp_sq_log_wqesz - 2);
 466 
 467         tail      = wq->wq_tail;
 468         head      = wq->wq_head;
 469         status    = DDI_SUCCESS;
 470 
 471 post_next:
 472         print_rdma = 0;
 473         rlen = 0;
 474         strong_order = 0;
 475 
 476         /*
 477          * Check for "queue full" condition.  If the queue
 478          * is already full, then no more WQEs can be posted.
 479          * So break out, ring a doorbell (if necessary) and


2203 
2204         mutex_exit(&mr->mr_lock);
2205         mutex_exit(&mw->mr_lock);
2206         return (DDI_SUCCESS);
2207 }
2208 
2209 
2210 /*
2211  * hermon_wrid_from_reset_handling()
2212  *    Context: Can be called from interrupt or base context.
2213  */
2214 /* ARGSUSED */
2215 int
2216 hermon_wrid_from_reset_handling(hermon_state_t *state, hermon_qphdl_t qp)
2217 {
2218         hermon_workq_hdr_t      *swq, *rwq;
2219 
2220         if (qp->qp_alloc_flags & IBT_QP_USER_MAP)
2221                 return (DDI_SUCCESS);
2222 
2223 #ifdef __lock_lint
2224         mutex_enter(&qp->qp_rq_cqhdl->cq_lock);
2225         mutex_enter(&qp->qp_sq_cqhdl->cq_lock);
2226 #else
2227         /* grab the cq lock(s) to modify the wqavl tree */
2228         if (qp->qp_rq_cqhdl)
2229                 mutex_enter(&qp->qp_rq_cqhdl->cq_lock);
2230         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2231             qp->qp_sq_cqhdl != NULL)
2232                 mutex_enter(&qp->qp_sq_cqhdl->cq_lock);
2233 #endif
2234 
2235         /* Chain the newly allocated work queue header to the CQ's list */
2236         if (qp->qp_sq_cqhdl)
2237                 hermon_cq_workq_add(qp->qp_sq_cqhdl, &qp->qp_sq_wqavl);
2238 
2239         swq = qp->qp_sq_wqhdr;
2240         swq->wq_head = 0;
2241         swq->wq_tail = 0;
2242         swq->wq_full = 0;
2243 
2244         /*
2245          * Now we repeat all the above operations for the receive work queue,
2246          * or shared receive work queue.
2247          *
2248          * Note: We still use the 'qp_rq_cqhdl' even in the SRQ case.
2249          */
2250 
2251 #ifdef __lock_lint
2252         mutex_enter(&qp->qp_srqhdl->srq_lock);
2253 #else
2254         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2255                 mutex_enter(&qp->qp_srqhdl->srq_lock);
2256         } else {
2257                 rwq = qp->qp_rq_wqhdr;
2258                 rwq->wq_head = 0;
2259                 rwq->wq_tail = 0;
2260                 rwq->wq_full = 0;
2261                 qp->qp_rq_wqecntr = 0;
2262         }
2263 #endif
2264         hermon_cq_workq_add(qp->qp_rq_cqhdl, &qp->qp_rq_wqavl);
2265 
2266 #ifdef __lock_lint
2267         mutex_exit(&qp->qp_srqhdl->srq_lock);
2268 #else
2269         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2270                 mutex_exit(&qp->qp_srqhdl->srq_lock);
2271         }
2272 #endif
2273 
2274 #ifdef __lock_lint
2275         mutex_exit(&qp->qp_sq_cqhdl->cq_lock);
2276         mutex_exit(&qp->qp_rq_cqhdl->cq_lock);
2277 #else
2278         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2279             qp->qp_sq_cqhdl != NULL)
2280                 mutex_exit(&qp->qp_sq_cqhdl->cq_lock);
2281         if (qp->qp_rq_cqhdl)
2282                 mutex_exit(&qp->qp_rq_cqhdl->cq_lock);
2283 #endif
2284         return (DDI_SUCCESS);
2285 }
2286 
2287 
2288 /*
2289  * hermon_wrid_to_reset_handling()
2290  *    Context: Can be called from interrupt or base context.
2291  */
2292 int
2293 hermon_wrid_to_reset_handling(hermon_state_t *state, hermon_qphdl_t qp)
2294 {
2295         if (qp->qp_alloc_flags & IBT_QP_USER_MAP)
2296                 return (DDI_SUCCESS);
2297 
2298         /*
2299          * If there are unpolled entries in these CQs, they are
2300          * polled/flushed.
2301          * Grab the CQ lock(s) before manipulating the lists.
2302          */
2303 #ifdef __lock_lint
2304         mutex_enter(&qp->qp_rq_cqhdl->cq_lock);
2305         mutex_enter(&qp->qp_sq_cqhdl->cq_lock);
2306 #else
2307         /* grab the cq lock(s) to modify the wqavl tree */
2308         if (qp->qp_rq_cqhdl)
2309                 mutex_enter(&qp->qp_rq_cqhdl->cq_lock);
2310         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2311             qp->qp_sq_cqhdl != NULL)
2312                 mutex_enter(&qp->qp_sq_cqhdl->cq_lock);
2313 #endif
2314 
2315 #ifdef __lock_lint
2316         mutex_enter(&qp->qp_srqhdl->srq_lock);
2317 #else
2318         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2319                 mutex_enter(&qp->qp_srqhdl->srq_lock);
2320         }
2321 #endif
2322         /*
2323          * Flush the entries on the CQ for this QP's QPN.
2324          */
2325         hermon_cq_entries_flush(state, qp);
2326 
2327 #ifdef __lock_lint
2328         mutex_exit(&qp->qp_srqhdl->srq_lock);
2329 #else
2330         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2331                 mutex_exit(&qp->qp_srqhdl->srq_lock);
2332         }
2333 #endif
2334 
2335         hermon_cq_workq_remove(qp->qp_rq_cqhdl, &qp->qp_rq_wqavl);
2336         if (qp->qp_sq_cqhdl != NULL)
2337                 hermon_cq_workq_remove(qp->qp_sq_cqhdl, &qp->qp_sq_wqavl);
2338 
2339 #ifdef __lock_lint
2340         mutex_exit(&qp->qp_sq_cqhdl->cq_lock);
2341         mutex_exit(&qp->qp_rq_cqhdl->cq_lock);
2342 #else
2343         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2344             qp->qp_sq_cqhdl != NULL)
2345                 mutex_exit(&qp->qp_sq_cqhdl->cq_lock);
2346         if (qp->qp_rq_cqhdl)
2347                 mutex_exit(&qp->qp_rq_cqhdl->cq_lock);
2348 #endif
2349 
2350         return (IBT_SUCCESS);
2351 }
2352 
2353 
2354 /*
2355  * hermon_wrid_get_entry()
2356  *    Context: Can be called from interrupt or base context.
2357  */
2358 uint64_t
2359 hermon_wrid_get_entry(hermon_cqhdl_t cq, hermon_hw_cqe_t *cqe)
2360 {
2361         hermon_workq_avl_t      *wqa;
2362         hermon_workq_hdr_t      *wq;
2363         uint64_t                wrid;
2364         uint_t                  send_or_recv, qpnum;
2365         uint32_t                indx;
2366 
2367         /*
2368          * Determine whether this CQE is a send or receive completion.


2429 
2430 
2431 /*
2432  * hermon_wrid_workq_find()
2433  *    Context: Can be called from interrupt or base context.
2434  */
2435 static hermon_workq_avl_t *
2436 hermon_wrid_wqavl_find(hermon_cqhdl_t cq, uint_t qpn, uint_t wq_type)
2437 {
2438         hermon_workq_avl_t      *curr;
2439         hermon_workq_compare_t  cmp;
2440 
2441         /*
2442          * Walk the CQ's work queue list, trying to find a send or recv queue
2443          * with the same QP number.  We do this even if we are going to later
2444          * create a new entry because it helps us easily find the end of the
2445          * list.
2446          */
2447         cmp.cmp_qpn = qpn;
2448         cmp.cmp_type = wq_type;
2449 #ifdef __lock_lint
2450         hermon_wrid_workq_compare(NULL, NULL);
2451 #endif
2452         curr = avl_find(&cq->cq_wrid_wqhdr_avl_tree, &cmp, NULL);
2453 
2454         return (curr);
2455 }
2456 
2457 
2458 /*
2459  * hermon_wrid_wqhdr_create()
2460  *    Context: Can be called from base context.
2461  */
2462 /* ARGSUSED */
2463 hermon_workq_hdr_t *
2464 hermon_wrid_wqhdr_create(int bufsz)
2465 {
2466         hermon_workq_hdr_t      *wqhdr;
2467 
2468         /*
2469          * Allocate space for the wqhdr, and an array to record all the wrids.
2470          */
2471         wqhdr = (hermon_workq_hdr_t *)kmem_zalloc(sizeof (*wqhdr), KM_NOSLEEP);
2472         if (wqhdr == NULL) {
2473                 return (NULL);
2474         }
2475         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqhdr))
2476         wqhdr->wq_wrid = kmem_zalloc(bufsz * sizeof (uint64_t), KM_NOSLEEP);
2477         if (wqhdr->wq_wrid == NULL) {
2478                 kmem_free(wqhdr, sizeof (*wqhdr));
2479                 return (NULL);
2480         }
2481         wqhdr->wq_size = bufsz;
2482         wqhdr->wq_mask = bufsz - 1;
2483 
2484         return (wqhdr);
2485 }
2486 
2487 void
2488 hermon_wrid_wqhdr_destroy(hermon_workq_hdr_t *wqhdr)
2489 {
2490         kmem_free(wqhdr->wq_wrid, wqhdr->wq_size * sizeof (uint64_t));
2491         kmem_free(wqhdr, sizeof (*wqhdr));
2492 }
2493 
2494 
2495 /*
2496  * hermon_cq_workq_add()
2497  *    Context: Can be called from interrupt or base context.
2498  */
2499 static void
2500 hermon_cq_workq_add(hermon_cqhdl_t cq, hermon_workq_avl_t *wqavl)
2501 {
2502         hermon_workq_compare_t  cmp;
2503         avl_index_t             where;
2504 
2505         cmp.cmp_qpn = wqavl->wqa_qpn;
2506         cmp.cmp_type = wqavl->wqa_type;
2507 #ifdef __lock_lint
2508         hermon_wrid_workq_compare(NULL, NULL);
2509 #endif
2510         (void) avl_find(&cq->cq_wrid_wqhdr_avl_tree, &cmp, &where);
2511         avl_insert(&cq->cq_wrid_wqhdr_avl_tree, wqavl, where);
2512 }
2513 
2514 
2515 /*
2516  * hermon_cq_workq_remove()
2517  *    Context: Can be called from interrupt or base context.
2518  */
2519 static void
2520 hermon_cq_workq_remove(hermon_cqhdl_t cq, hermon_workq_avl_t *wqavl)
2521 {
2522 #ifdef __lock_lint
2523         hermon_wrid_workq_compare(NULL, NULL);
2524 #endif
2525         avl_remove(&cq->cq_wrid_wqhdr_avl_tree, wqavl);
2526 }


  82         uint64_t                        *desc;
  83         uint32_t                        desc_sz;
  84         uint32_t                        signaled_dbd, solicited;
  85         uint32_t                        head, tail, next_tail, qsize_msk;
  86         uint32_t                        hdrmwqes;
  87         uint32_t                        nopcode, fence, immed_data = 0;
  88         hermon_hw_wqe_sgl_t             *ds, *old_ds;
  89         ibt_wr_ds_t                     *sgl;
  90         int                             nds;
  91         int                             i, j, last_ds, num_ds, status;
  92         uint32_t                        *wqe_start;
  93         int                             sectperwqe;
  94         uint_t                          posted_cnt = 0;
  95         int                             total_len, strong_order, fc_bits, cksum;
  96 
  97 
  98         /* initialize the FMA retry loop */
  99         hermon_pio_init(fm_loop_cnt, fm_status, fm_test_num);
 100 
 101         ASSERT(MUTEX_HELD(&qp->qp_sq_lock));

 102 
 103         /* Grab the lock for the WRID list */
 104         membar_consumer();
 105 
 106         /* Save away some initial QP state */
 107         wq = qp->qp_sq_wqhdr;
 108         qsize_msk = wq->wq_mask;
 109         hdrmwqes  = qp->qp_sq_hdrmwqes;              /* in WQEs  */
 110         sectperwqe = 1 << (qp->qp_sq_log_wqesz - 2);
 111 
 112         tail      = wq->wq_tail;
 113         head      = wq->wq_head;
 114         status    = DDI_SUCCESS;
 115 
 116 post_next:
 117         /*
 118          * Check for "queue full" condition.  If the queue
 119          * is already full, then no more WQEs can be posted.
 120          * So break out, ring a doorbell (if necessary) and
 121          * return an error


 438         hermon_hw_snd_wqe_bind_t        *bn;
 439         hermon_hw_snd_wqe_frwr_t        *frwr;
 440         hermon_hw_snd_wqe_local_inv_t   *li;
 441         hermon_hw_wqe_sgl_t             *ds;
 442         ibt_wr_ds_t                     *sgl;
 443         int                             nds;
 444         int                             i, last_ds, num_ds;
 445         uint32_t                        *wqe_start;
 446         int                             sectperwqe;
 447         uint_t                          posted_cnt = 0;
 448         int                             strong_order;
 449         int                             print_rdma;
 450         int                             rlen;
 451         uint32_t                        rkey;
 452         uint64_t                        raddr;
 453 
 454         /* initialize the FMA retry loop */
 455         hermon_pio_init(fm_loop_cnt, fm_status, fm_test_num);
 456 
 457         ASSERT(MUTEX_HELD(&qp->qp_sq_lock));

 458 
 459         /* Save away some initial QP state */
 460         wq = qp->qp_sq_wqhdr;
 461         qsize_msk = wq->wq_mask;
 462         hdrmwqes  = qp->qp_sq_hdrmwqes;              /* in WQEs  */
 463         sectperwqe = 1 << (qp->qp_sq_log_wqesz - 2);
 464 
 465         tail      = wq->wq_tail;
 466         head      = wq->wq_head;
 467         status    = DDI_SUCCESS;
 468 
 469 post_next:
 470         print_rdma = 0;
 471         rlen = 0;
 472         strong_order = 0;
 473 
 474         /*
 475          * Check for "queue full" condition.  If the queue
 476          * is already full, then no more WQEs can be posted.
 477          * So break out, ring a doorbell (if necessary) and


2201 
2202         mutex_exit(&mr->mr_lock);
2203         mutex_exit(&mw->mr_lock);
2204         return (DDI_SUCCESS);
2205 }
2206 
2207 
2208 /*
2209  * hermon_wrid_from_reset_handling()
2210  *    Context: Can be called from interrupt or base context.
2211  */
2212 /* ARGSUSED */
2213 int
2214 hermon_wrid_from_reset_handling(hermon_state_t *state, hermon_qphdl_t qp)
2215 {
2216         hermon_workq_hdr_t      *swq, *rwq;
2217 
2218         if (qp->qp_alloc_flags & IBT_QP_USER_MAP)
2219                 return (DDI_SUCCESS);
2220 




2221         /* grab the cq lock(s) to modify the wqavl tree */
2222         if (qp->qp_rq_cqhdl)
2223                 mutex_enter(&qp->qp_rq_cqhdl->cq_lock);
2224         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2225             qp->qp_sq_cqhdl != NULL)
2226                 mutex_enter(&qp->qp_sq_cqhdl->cq_lock);

2227 
2228         /* Chain the newly allocated work queue header to the CQ's list */
2229         if (qp->qp_sq_cqhdl)
2230                 hermon_cq_workq_add(qp->qp_sq_cqhdl, &qp->qp_sq_wqavl);
2231 
2232         swq = qp->qp_sq_wqhdr;
2233         swq->wq_head = 0;
2234         swq->wq_tail = 0;
2235         swq->wq_full = 0;
2236 
2237         /*
2238          * Now we repeat all the above operations for the receive work queue,
2239          * or shared receive work queue.
2240          *
2241          * Note: We still use the 'qp_rq_cqhdl' even in the SRQ case.
2242          */
2243 



2244         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2245                 mutex_enter(&qp->qp_srqhdl->srq_lock);
2246         } else {
2247                 rwq = qp->qp_rq_wqhdr;
2248                 rwq->wq_head = 0;
2249                 rwq->wq_tail = 0;
2250                 rwq->wq_full = 0;
2251                 qp->qp_rq_wqecntr = 0;
2252         }

2253         hermon_cq_workq_add(qp->qp_rq_cqhdl, &qp->qp_rq_wqavl);
2254 



2255         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2256                 mutex_exit(&qp->qp_srqhdl->srq_lock);
2257         }

2258 




2259         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2260             qp->qp_sq_cqhdl != NULL)
2261                 mutex_exit(&qp->qp_sq_cqhdl->cq_lock);
2262         if (qp->qp_rq_cqhdl)
2263                 mutex_exit(&qp->qp_rq_cqhdl->cq_lock);

2264         return (DDI_SUCCESS);
2265 }
2266 
2267 
2268 /*
2269  * hermon_wrid_to_reset_handling()
2270  *    Context: Can be called from interrupt or base context.
2271  */
2272 int
2273 hermon_wrid_to_reset_handling(hermon_state_t *state, hermon_qphdl_t qp)
2274 {
2275         if (qp->qp_alloc_flags & IBT_QP_USER_MAP)
2276                 return (DDI_SUCCESS);
2277 
2278         /*
2279          * If there are unpolled entries in these CQs, they are
2280          * polled/flushed.
2281          * Grab the CQ lock(s) before manipulating the lists.
2282          */




2283         /* grab the cq lock(s) to modify the wqavl tree */
2284         if (qp->qp_rq_cqhdl)
2285                 mutex_enter(&qp->qp_rq_cqhdl->cq_lock);
2286         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2287             qp->qp_sq_cqhdl != NULL)
2288                 mutex_enter(&qp->qp_sq_cqhdl->cq_lock);

2289 



2290         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2291                 mutex_enter(&qp->qp_srqhdl->srq_lock);
2292         }

2293         /*
2294          * Flush the entries on the CQ for this QP's QPN.
2295          */
2296         hermon_cq_entries_flush(state, qp);
2297 



2298         if (qp->qp_alloc_flags & IBT_QP_USES_SRQ) {
2299                 mutex_exit(&qp->qp_srqhdl->srq_lock);
2300         }

2301 
2302         hermon_cq_workq_remove(qp->qp_rq_cqhdl, &qp->qp_rq_wqavl);
2303         if (qp->qp_sq_cqhdl != NULL)
2304                 hermon_cq_workq_remove(qp->qp_sq_cqhdl, &qp->qp_sq_wqavl);
2305 




2306         if (qp->qp_rq_cqhdl != qp->qp_sq_cqhdl &&
2307             qp->qp_sq_cqhdl != NULL)
2308                 mutex_exit(&qp->qp_sq_cqhdl->cq_lock);
2309         if (qp->qp_rq_cqhdl)
2310                 mutex_exit(&qp->qp_rq_cqhdl->cq_lock);

2311 
2312         return (IBT_SUCCESS);
2313 }
2314 
2315 
2316 /*
2317  * hermon_wrid_get_entry()
2318  *    Context: Can be called from interrupt or base context.
2319  */
2320 uint64_t
2321 hermon_wrid_get_entry(hermon_cqhdl_t cq, hermon_hw_cqe_t *cqe)
2322 {
2323         hermon_workq_avl_t      *wqa;
2324         hermon_workq_hdr_t      *wq;
2325         uint64_t                wrid;
2326         uint_t                  send_or_recv, qpnum;
2327         uint32_t                indx;
2328 
2329         /*
2330          * Determine whether this CQE is a send or receive completion.


2391 
2392 
2393 /*
2394  * hermon_wrid_workq_find()
2395  *    Context: Can be called from interrupt or base context.
2396  */
2397 static hermon_workq_avl_t *
2398 hermon_wrid_wqavl_find(hermon_cqhdl_t cq, uint_t qpn, uint_t wq_type)
2399 {
2400         hermon_workq_avl_t      *curr;
2401         hermon_workq_compare_t  cmp;
2402 
2403         /*
2404          * Walk the CQ's work queue list, trying to find a send or recv queue
2405          * with the same QP number.  We do this even if we are going to later
2406          * create a new entry because it helps us easily find the end of the
2407          * list.
2408          */
2409         cmp.cmp_qpn = qpn;
2410         cmp.cmp_type = wq_type;



2411         curr = avl_find(&cq->cq_wrid_wqhdr_avl_tree, &cmp, NULL);
2412 
2413         return (curr);
2414 }
2415 
2416 
2417 /*
2418  * hermon_wrid_wqhdr_create()
2419  *    Context: Can be called from base context.
2420  */
2421 /* ARGSUSED */
2422 hermon_workq_hdr_t *
2423 hermon_wrid_wqhdr_create(int bufsz)
2424 {
2425         hermon_workq_hdr_t      *wqhdr;
2426 
2427         /*
2428          * Allocate space for the wqhdr, and an array to record all the wrids.
2429          */
2430         wqhdr = (hermon_workq_hdr_t *)kmem_zalloc(sizeof (*wqhdr), KM_NOSLEEP);
2431         if (wqhdr == NULL) {
2432                 return (NULL);
2433         }

2434         wqhdr->wq_wrid = kmem_zalloc(bufsz * sizeof (uint64_t), KM_NOSLEEP);
2435         if (wqhdr->wq_wrid == NULL) {
2436                 kmem_free(wqhdr, sizeof (*wqhdr));
2437                 return (NULL);
2438         }
2439         wqhdr->wq_size = bufsz;
2440         wqhdr->wq_mask = bufsz - 1;
2441 
2442         return (wqhdr);
2443 }
2444 
2445 void
2446 hermon_wrid_wqhdr_destroy(hermon_workq_hdr_t *wqhdr)
2447 {
2448         kmem_free(wqhdr->wq_wrid, wqhdr->wq_size * sizeof (uint64_t));
2449         kmem_free(wqhdr, sizeof (*wqhdr));
2450 }
2451 
2452 
2453 /*
2454  * hermon_cq_workq_add()
2455  *    Context: Can be called from interrupt or base context.
2456  */
2457 static void
2458 hermon_cq_workq_add(hermon_cqhdl_t cq, hermon_workq_avl_t *wqavl)
2459 {
2460         hermon_workq_compare_t  cmp;
2461         avl_index_t             where;
2462 
2463         cmp.cmp_qpn = wqavl->wqa_qpn;
2464         cmp.cmp_type = wqavl->wqa_type;



2465         (void) avl_find(&cq->cq_wrid_wqhdr_avl_tree, &cmp, &where);
2466         avl_insert(&cq->cq_wrid_wqhdr_avl_tree, wqavl, where);
2467 }
2468 
2469 
2470 /*
2471  * hermon_cq_workq_remove()
2472  *    Context: Can be called from interrupt or base context.
2473  */
2474 static void
2475 hermon_cq_workq_remove(hermon_cqhdl_t cq, hermon_workq_avl_t *wqavl)
2476 {



2477         avl_remove(&cq->cq_wrid_wqhdr_avl_tree, wqavl);
2478 }