Print this page
8368 remove warlock leftovers from usr/src/uts

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ib/adapters/tavor/tavor_wr.c
          +++ new/usr/src/uts/common/io/ib/adapters/tavor/tavor_wr.c
↓ open down ↓ 2353 lines elided ↑ open up ↑
2354 2354                           * If we couldn't find/allocate space for the workq
2355 2355                           * header, then drop the lock(s) and return failure.
2356 2356                           */
2357 2357                          tavor_wrid_wqhdr_unlock_both(qp);
2358 2358                          TNF_PROBE_0(tavor_wrid_from_reset_handling_wqhdr_fail,
2359 2359                              TAVOR_TNF_ERROR, "");
2360 2360                          TAVOR_TNF_EXIT(tavor_wrid_from_reset_handling);
2361 2361                          return (ibc_get_ci_failure(0));
2362 2362                  }
2363 2363          }
2364      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swq))
2365 2364          qp->qp_sq_wqhdr = swq;
2366 2365          swq->wq_size = qp->qp_sq_bufsz;
2367 2366          swq->wq_head = 0;
2368 2367          swq->wq_tail = 0;
2369 2368          swq->wq_full = 0;
2370 2369  
2371 2370          /*
2372 2371           * Allocate space for the tavor_wrid_entry_t container
2373 2372           */
2374 2373          s_wridlist = tavor_wrid_get_list(swq->wq_size);
↓ open down ↓ 7 lines elided ↑ open up ↑
2382 2381                  if (create_new_swq) {
2383 2382                          tavor_cq_wqhdr_remove(qp->qp_sq_cqhdl, swq);
2384 2383                  }
2385 2384  
2386 2385                  tavor_wrid_wqhdr_unlock_both(qp);
2387 2386                  TNF_PROBE_0(tavor_wrid_from_reset_handling_wridlist_fail,
2388 2387                      TAVOR_TNF_ERROR, "");
2389 2388                  TAVOR_TNF_EXIT(tavor_wrid_from_reset_handling);
2390 2389                  return (ibc_get_ci_failure(0));
2391 2390          }
2392      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*s_wridlist))
2393 2391          s_wridlist->wl_wqhdr = swq;
2394 2392  
2395 2393          /* Chain the new WRID list container to the workq hdr list */
2396 2394          mutex_enter(&swq->wq_wrid_wql->wql_lock);
2397 2395          tavor_wrid_wqhdr_add(swq, s_wridlist);
2398 2396          mutex_exit(&swq->wq_wrid_wql->wql_lock);
2399 2397  
2400 2398          qp_srq_en = qp->qp_srq_en;
2401 2399  
2402      -#ifdef __lock_lint
2403      -        mutex_enter(&qp->qp_srqhdl->srq_lock);
2404      -#else
2405 2400          if (qp_srq_en == TAVOR_QP_SRQ_ENABLED) {
2406 2401                  mutex_enter(&qp->qp_srqhdl->srq_lock);
2407 2402          }
2408      -#endif
     2403 +
2409 2404          /*
2410 2405           * Now we repeat all the above operations for the receive work queue,
2411 2406           * or shared receive work queue.
2412 2407           *
2413 2408           * Note: We still use the 'qp_rq_cqhdl' even in the SRQ case.
2414 2409           */
2415 2410          rwq = tavor_wrid_wqhdr_find(qp->qp_rq_cqhdl, qp->qp_qpnum,
2416 2411              TAVOR_WR_RECV);
2417 2412          if (rwq == NULL) {
2418 2413                  create_new_rwq = create_wql = 1;
↓ open down ↓ 21 lines elided ↑ open up ↑
2440 2435                           * and return failure.
2441 2436                           */
2442 2437                          mutex_enter(&swq->wq_wrid_wql->wql_lock);
2443 2438                          tavor_wrid_wqhdr_remove(swq, s_wridlist);
2444 2439                          mutex_exit(&swq->wq_wrid_wql->wql_lock);
2445 2440                          if (create_new_swq) {
2446 2441                                  tavor_cq_wqhdr_remove(qp->qp_sq_cqhdl,
2447 2442                                      swq);
2448 2443                          }
2449 2444  
2450      -#ifdef __lock_lint
2451      -                        mutex_exit(&qp->qp_srqhdl->srq_lock);
2452      -#else
2453 2445                          if (qp_srq_en == TAVOR_QP_SRQ_ENABLED) {
2454 2446                                  mutex_exit(&qp->qp_srqhdl->srq_lock);
2455 2447                          }
2456      -#endif
2457 2448  
2458 2449                          tavor_wrid_wqhdr_unlock_both(qp);
2459 2450                          TNF_PROBE_0(tavor_wrid_from_reset_handling_wqhdr_fail,
2460 2451                              TAVOR_TNF_ERROR, "");
2461 2452                          TAVOR_TNF_EXIT(tavor_wrid_from_reset_handling);
2462 2453                          return (ibc_get_ci_failure(0));
2463 2454                  }
2464 2455          }
2465      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rwq))
2466 2456  
2467 2457          /*
2468 2458           * Setup receive workq hdr
2469 2459           *
2470 2460           * If the QP is on an SRQ, we setup the SRQ specific fields, setting
2471 2461           * keeping a copy of the rwq pointer, setting the rwq bufsize
2472 2462           * appropriately, and initializing our part of the WQLock.
2473 2463           *
2474 2464           * In the normal QP case, the QP recv queue bufsize is used.
2475 2465           */
↓ open down ↓ 48 lines elided ↑ open up ↑
2524 2514                  mutex_enter(&swq->wq_wrid_wql->wql_lock);
2525 2515                  tavor_wrid_wqhdr_remove(swq, s_wridlist);
2526 2516                  mutex_exit(&swq->wq_wrid_wql->wql_lock);
2527 2517                  if (create_new_swq) {
2528 2518                          tavor_cq_wqhdr_remove(qp->qp_sq_cqhdl, swq);
2529 2519                  }
2530 2520                  if (create_new_rwq) {
2531 2521                          tavor_cq_wqhdr_remove(qp->qp_rq_cqhdl, rwq);
2532 2522                  }
2533 2523  
2534      -#ifdef __lock_lint
2535      -                mutex_exit(&qp->qp_srqhdl->srq_lock);
2536      -#else
2537 2524                  if (qp_srq_en == TAVOR_QP_SRQ_ENABLED) {
2538 2525                          mutex_exit(&qp->qp_srqhdl->srq_lock);
2539 2526                  }
2540      -#endif
2541 2527  
2542 2528                  tavor_wrid_wqhdr_unlock_both(qp);
2543 2529                  TNF_PROBE_0(tavor_wrid_from_reset_handling_wridlist_fail,
2544 2530                      TAVOR_TNF_ERROR, "");
2545 2531                  TAVOR_TNF_EXIT(tavor_wrid_from_reset_handling);
2546 2532                  return (ibc_get_ci_failure(0));
2547 2533          }
2548      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*r_wridlist))
2549 2534  
2550 2535          /*
2551 2536           * Initialize the wridlist
2552 2537           *
2553 2538           * In the normal QP case, there is no special initialization needed.
2554 2539           * We simply setup the wridlist backpointer to be the receive wqhdr
2555 2540           * (rwq).
2556 2541           *
2557 2542           * But in the SRQ case, there is no backpointer to the wqhdr possible.
2558 2543           * Instead we set 'wl_srq_en', specifying this wridlist is on an SRQ
↓ open down ↓ 20 lines elided ↑ open up ↑
2579 2564                  }
2580 2565          } else {
2581 2566                  r_wridlist->wl_wqhdr = rwq;
2582 2567          }
2583 2568  
2584 2569          /* Chain the WRID list "container" to the workq hdr list */
2585 2570          mutex_enter(&rwq->wq_wrid_wql->wql_lock);
2586 2571          tavor_wrid_wqhdr_add(rwq, r_wridlist);
2587 2572          mutex_exit(&rwq->wq_wrid_wql->wql_lock);
2588 2573  
2589      -#ifdef __lock_lint
2590      -        mutex_exit(&qp->qp_srqhdl->srq_lock);
2591      -#else
2592 2574          if (qp_srq_en == TAVOR_QP_SRQ_ENABLED) {
2593 2575                  mutex_exit(&qp->qp_srqhdl->srq_lock);
2594 2576          }
2595      -#endif
2596 2577  
2597      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*r_wridlist))
2598      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*rwq))
2599      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*s_wridlist))
2600      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*swq))
2601      -
2602 2578          tavor_wrid_wqhdr_unlock_both(qp);
2603 2579          TAVOR_TNF_EXIT(tavor_wrid_from_reset_handling);
2604 2580          return (DDI_SUCCESS);
2605 2581  }
2606 2582  
2607 2583  
2608 2584  /*
2609 2585   * tavor_wrid_to_reset_handling()
2610 2586   *    Context: Can be called from interrupt or base context.
2611 2587   */
↓ open down ↓ 504 lines elided ↑ open up ↑
3116 3092  
3117 3093          /*
3118 3094           * Walk the list of work queue headers and free up all the WRID list
3119 3095           * containers chained to it.  Note: We don't need to grab the locks
3120 3096           * for each of the individual WRID lists here because the only way
3121 3097           * things can be added or removed from the list at this point would be
3122 3098           * through post a work request to a QP.  But if we've come this far,
3123 3099           * then we can be assured that there are no longer any QP associated
3124 3100           * with the CQ that we are trying to free.
3125 3101           */
3126      -#ifdef __lock_lint
3127      -        tavor_wrid_wqhdr_compare(NULL, NULL);
3128      -#endif
3129 3102          treep = &cq->cq_wrid_wqhdr_avl_tree;
3130 3103          while ((curr = avl_destroy_nodes(treep, &cookie)) != NULL) {
3131      -                _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*curr))
3132 3104                  container = curr->wq_wrid_poll;
3133 3105                  while (container != NULL) {
3134 3106                          to_free   = container;
3135 3107                          container = container->wl_next;
3136 3108                          /*
3137 3109                           * If reaping the WRID list containers pulls the last
3138 3110                           * container from the given work queue header, then
3139 3111                           * we free the work queue header as well.  Note: we
3140 3112                           * ignore the return value because we know that the
3141 3113                           * work queue header should always be freed once the
↓ open down ↓ 36 lines elided ↑ open up ↑
3178 3150  
3179 3151          /*
3180 3152           * Note that this allocation has to be a NOSLEEP operation here
3181 3153           * because we are holding the "wqhdr_list_lock" and, therefore,
3182 3154           * could get raised to the interrupt level.
3183 3155           */
3184 3156          wridlist = (tavor_wrid_list_hdr_t *)kmem_zalloc(size, KM_NOSLEEP);
3185 3157          if (wridlist == NULL) {
3186 3158                  return (NULL);
3187 3159          }
3188      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wridlist))
3189 3160  
3190 3161          /* Complete the "container" initialization */
3191 3162          wridlist->wl_size = qsize;
3192 3163          wridlist->wl_full = 0;
3193 3164          wridlist->wl_head = 0;
3194 3165          wridlist->wl_tail = 0;
3195 3166          wridlist->wl_wre = (tavor_wrid_entry_t *)kmem_zalloc(qsize *
3196 3167              sizeof (tavor_wrid_entry_t), KM_NOSLEEP);
3197 3168          if (wridlist->wl_wre == NULL) {
3198 3169                  kmem_free(wridlist, size);
↓ open down ↓ 121 lines elided ↑ open up ↑
3320 3291          ASSERT(MUTEX_HELD(&cq->cq_wrid_wqhdr_lock));
3321 3292  
3322 3293          /*
3323 3294           * Walk the CQ's work queue list, trying to find a send or recv queue
3324 3295           * with the same QP number.  We do this even if we are going to later
3325 3296           * create a new entry because it helps us easily find the end of the
3326 3297           * list.
3327 3298           */
3328 3299          cmp.cmp_qpn = qpn;
3329 3300          cmp.cmp_type = wq_type;
3330      -#ifdef __lock_lint
3331      -        tavor_wrid_wqhdr_compare(NULL, NULL);
3332      -#endif
3333 3301          curr = avl_find(&cq->cq_wrid_wqhdr_avl_tree, &cmp, NULL);
3334 3302  
3335 3303          TAVOR_TNF_EXIT(tavor_wrid_wqhdr_find);
3336 3304          return (curr);
3337 3305  }
3338 3306  
3339 3307  
3340 3308  /*
3341 3309   * tavor_wrid_wqhdr_create()
3342 3310   *    Context: Can be called from interrupt or base context.
↓ open down ↓ 14 lines elided ↑ open up ↑
3357 3325           * which needs to be initialized.  Note that this allocation has to be
3358 3326           * a NOSLEEP operation because we are holding the "cq_wrid_wqhdr_lock"
3359 3327           * and, therefore, could get raised to the interrupt level.
3360 3328           */
3361 3329          wqhdr_tmp = (tavor_workq_hdr_t *)kmem_zalloc(
3362 3330              sizeof (tavor_workq_hdr_t), KM_NOSLEEP);
3363 3331          if (wqhdr_tmp == NULL) {
3364 3332                  TAVOR_TNF_EXIT(tavor_wrid_wqhdr_create);
3365 3333                  return (NULL);
3366 3334          }
3367      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqhdr_tmp))
3368 3335          wqhdr_tmp->wq_qpn       = qpn;
3369 3336          wqhdr_tmp->wq_type      = wq_type;
3370 3337  
3371 3338          if (create_wql) {
3372 3339                  wqhdr_tmp->wq_wrid_wql = tavor_wrid_wql_create(state);
3373 3340                  if (wqhdr_tmp->wq_wrid_wql == NULL) {
3374 3341                          kmem_free(wqhdr_tmp, sizeof (tavor_workq_hdr_t));
3375 3342                          TAVOR_TNF_EXIT(tavor_wrid_wqhdr_create);
3376 3343                          return (NULL);
3377 3344                  }
↓ open down ↓ 202 lines elided ↑ open up ↑
3580 3547   *    Context: Can be called from interrupt or base context.
3581 3548   */
3582 3549  static void
3583 3550  tavor_wrid_wqhdr_lock_both(tavor_qphdl_t qp)
3584 3551  {
3585 3552          tavor_cqhdl_t   sq_cq, rq_cq;
3586 3553  
3587 3554          sq_cq = qp->qp_sq_cqhdl;
3588 3555          rq_cq = qp->qp_rq_cqhdl;
3589 3556  
3590      -_NOTE(MUTEX_ACQUIRED_AS_SIDE_EFFECT(&sq_cq->cq_wrid_wqhdr_lock))
3591      -_NOTE(MUTEX_ACQUIRED_AS_SIDE_EFFECT(&rq_cq->cq_wrid_wqhdr_lock))
3592      -
3593 3557          /*
3594 3558           * If both work queues (send and recv) share a completion queue, then
3595 3559           * grab the common lock.  If they use different CQs (hence different
3596 3560           * "cq_wrid_wqhdr_list" locks), then grab the send one first, then the
3597 3561           * receive.  We do this consistently and correctly in
3598 3562           * tavor_wrid_wqhdr_unlock_both() below to avoid introducing any kind
3599      -         * of dead lock condition.  Note:  We add the "__lock_lint" code here
3600      -         * to fake out warlock into thinking we've grabbed both locks (when,
3601      -         * in fact, we only needed the one).
     3563 +         * of dead lock condition.
3602 3564           */
3603 3565          if (sq_cq == rq_cq) {
3604 3566                  mutex_enter(&sq_cq->cq_wrid_wqhdr_lock);
3605      -#ifdef  __lock_lint
3606      -                mutex_enter(&rq_cq->cq_wrid_wqhdr_lock);
3607      -#endif
3608 3567          } else {
3609 3568                  mutex_enter(&sq_cq->cq_wrid_wqhdr_lock);
3610 3569                  mutex_enter(&rq_cq->cq_wrid_wqhdr_lock);
3611 3570          }
3612 3571  }
3613 3572  
3614 3573  /*
3615 3574   * tavor_wrid_wqhdr_unlock_both()
3616 3575   *    Context: Can be called from interrupt or base context.
3617 3576   */
3618 3577  static void
3619 3578  tavor_wrid_wqhdr_unlock_both(tavor_qphdl_t qp)
3620 3579  {
3621 3580          tavor_cqhdl_t   sq_cq, rq_cq;
3622 3581  
3623 3582          sq_cq = qp->qp_sq_cqhdl;
3624 3583          rq_cq = qp->qp_rq_cqhdl;
3625 3584  
3626      -_NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&rq_cq->cq_wrid_wqhdr_lock))
3627      -_NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&sq_cq->cq_wrid_wqhdr_lock))
3628      -
3629 3585          /*
3630 3586           * See tavor_wrid_wqhdr_lock_both() above for more detail
3631 3587           */
3632 3588          if (sq_cq == rq_cq) {
3633      -#ifdef  __lock_lint
3634      -                mutex_exit(&rq_cq->cq_wrid_wqhdr_lock);
3635      -#endif
3636 3589                  mutex_exit(&sq_cq->cq_wrid_wqhdr_lock);
3637 3590          } else {
3638 3591                  mutex_exit(&rq_cq->cq_wrid_wqhdr_lock);
3639 3592                  mutex_exit(&sq_cq->cq_wrid_wqhdr_lock);
3640 3593          }
3641 3594  }
3642 3595  
3643 3596  
3644 3597  /*
3645 3598   * tavor_cq_wqhdr_add()
↓ open down ↓ 2 lines elided ↑ open up ↑
3648 3601  static void
3649 3602  tavor_cq_wqhdr_add(tavor_cqhdl_t cq, tavor_workq_hdr_t *wqhdr)
3650 3603  {
3651 3604          tavor_workq_compare_t   cmp;
3652 3605          avl_index_t             where;
3653 3606  
3654 3607          ASSERT(MUTEX_HELD(&cq->cq_wrid_wqhdr_lock));
3655 3608  
3656 3609          cmp.cmp_qpn = wqhdr->wq_qpn;
3657 3610          cmp.cmp_type = wqhdr->wq_type;
3658      -#ifdef __lock_lint
3659      -        tavor_wrid_wqhdr_compare(NULL, NULL);
3660      -#endif
3661 3611          (void) avl_find(&cq->cq_wrid_wqhdr_avl_tree, &cmp, &where);
3662 3612          /*
3663 3613           * If the CQ's work queue list is empty, then just add it.
3664 3614           * Otherwise, chain it to the beginning of the list.
3665 3615           */
3666 3616          avl_insert(&cq->cq_wrid_wqhdr_avl_tree, wqhdr, where);
3667 3617  }
3668 3618  
3669 3619  
3670 3620  /*
3671 3621   * tavor_cq_wqhdr_remove()
3672 3622   *    Context: Can be called from interrupt or base context.
3673 3623   */
3674 3624  static void
3675 3625  tavor_cq_wqhdr_remove(tavor_cqhdl_t cq, tavor_workq_hdr_t *wqhdr)
3676 3626  {
3677 3627          ASSERT(MUTEX_HELD(&cq->cq_wrid_wqhdr_lock));
3678 3628  
3679      -#ifdef __lock_lint
3680      -        tavor_wrid_wqhdr_compare(NULL, NULL);
3681      -#endif
3682 3629          /* Remove "wqhdr" from the work queue header list on "cq" */
3683 3630          avl_remove(&cq->cq_wrid_wqhdr_avl_tree, wqhdr);
3684 3631  
3685 3632          /*
3686 3633           * Release reference to WQL; If this is the last reference, this call
3687 3634           * also has the side effect of freeing up the 'wq_wrid_wql' memory.
3688 3635           */
3689 3636          tavor_wql_refcnt_dec(wqhdr->wq_wrid_wql);
3690 3637  
3691 3638          /* Free the memory associated with "wqhdr" */
↓ open down ↓ 44 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX