Print this page
8368 remove warlock leftovers from usr/src/uts

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ib/adapters/tavor/tavor_srq.c
          +++ new/usr/src/uts/common/io/ib/adapters/tavor/tavor_srq.c
↓ open down ↓ 70 lines elided ↑ open up ↑
  71   71          uint64_t                value, srq_desc_off;
  72   72          uint32_t                lkey;
  73   73          uint32_t                log_srq_size;
  74   74          uint32_t                uarpg;
  75   75          uint_t                  wq_location, dma_xfer_mode, srq_is_umap;
  76   76          int                     flag, status;
  77   77          char                    *errormsg;
  78   78          uint_t                  max_sgl;
  79   79          uint_t                  wqesz;
  80   80  
  81      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sizes))
  82      -
  83   81          TAVOR_TNF_ENTER(tavor_srq_alloc);
  84   82  
  85   83          /*
  86   84           * Check the "options" flag.  Currently this flag tells the driver
  87   85           * whether or not the SRQ's work queues should be come from normal
  88   86           * system memory or whether they should be allocated from DDR memory.
  89   87           */
  90   88          if (op == NULL) {
  91   89                  wq_location = TAVOR_QUEUE_LOCATION_NORMAL;
  92   90          } else {
↓ open down ↓ 43 lines elided ↑ open up ↑
 136  134  
 137  135          /* Allocate the SRQ Handle entry */
 138  136          status = tavor_rsrc_alloc(state, TAVOR_SRQHDL, 1, sleepflag, &rsrc);
 139  137          if (status != DDI_SUCCESS) {
 140  138                  /* Set "status" and "errormsg" and goto failure */
 141  139                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed SRQ handle");
 142  140                  goto srqalloc_fail2;
 143  141          }
 144  142  
 145  143          srq = (tavor_srqhdl_t)rsrc->tr_addr;
 146      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq))
 147  144  
 148  145          srq->srq_srqnum = srqc->tr_indx;        /* just use index */
 149  146  
 150  147          /*
 151  148           * If this will be a user-mappable SRQ, then allocate an entry for
 152  149           * the "userland resources database".  This will later be added to
 153  150           * the database (after all further SRQ operations are successful).
 154  151           * If we fail here, we must undo the reference counts and the
 155  152           * previous resource allocation.
 156  153           */
↓ open down ↓ 93 lines elided ↑ open up ↑
 250  247          } else {
 251  248                  srq->srq_wqinfo.qa_location = wq_location;
 252  249          }
 253  250          status = tavor_queue_alloc(state, &srq->srq_wqinfo, sleepflag);
 254  251          if (status != DDI_SUCCESS) {
 255  252                  /* Set "status" and "errormsg" and goto failure */
 256  253                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed srq");
 257  254                  goto srqalloc_fail4;
 258  255          }
 259  256          buf = (uint32_t *)srq->srq_wqinfo.qa_buf_aligned;
 260      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))
 261  257  
 262  258          /*
 263  259           * Register the memory for the SRQ work queues.  The memory for the SRQ
 264  260           * must be registered in the Tavor TPT tables.  This gives us the LKey
 265  261           * to specify in the SRQ context later.  Note: If the work queue is to
 266  262           * be allocated from DDR memory, then only a "bypass" mapping is
 267  263           * appropriate.  And if the SRQ memory is user-mappable, then we force
 268  264           * DDI_DMA_CONSISTENT mapping.  Also, in order to meet the alignment
 269  265           * restriction, we pass the "mro_bind_override_addr" flag in the call
 270  266           * to tavor_mr_register().  This guarantees that the resulting IB vaddr
↓ open down ↓ 23 lines elided ↑ open up ↑
 294  290                  }
 295  291          }
 296  292          mr_op.mro_bind_dmahdl = srq->srq_wqinfo.qa_dmahdl;
 297  293          mr_op.mro_bind_override_addr = 1;
 298  294          status = tavor_mr_register(state, pd, &mr_attr, &mr, &mr_op);
 299  295          if (status != DDI_SUCCESS) {
 300  296                  /* Set "status" and "errormsg" and goto failure */
 301  297                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed register mr");
 302  298                  goto srqalloc_fail5;
 303  299          }
 304      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
 305  300          addr = mr->mr_bindinfo.bi_addr;
 306  301          lkey = mr->mr_lkey;
 307  302  
 308  303          /*
 309  304           * Calculate the offset between the kernel virtual address space
 310  305           * and the IB virtual address space.  This will be used when
 311  306           * posting work requests to properly initialize each WQE.
 312  307           */
 313  308          srq_desc_off = (uint64_t)(uintptr_t)srq->srq_wqinfo.qa_buf_aligned -
 314  309              (uint64_t)mr->mr_bindinfo.bi_addr;
 315  310  
 316  311          /*
 317  312           * Create WQL and Wridlist for use by this SRQ
 318  313           */
 319  314          srq->srq_wrid_wql = tavor_wrid_wql_create(state);
 320  315          if (srq->srq_wrid_wql == NULL) {
 321  316                  /* Set "status" and "errormsg" and goto failure */
 322  317                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed wql create");
 323  318                  goto srqalloc_fail6;
 324  319          }
 325      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(srq->srq_wrid_wql)))
 326  320  
 327  321          srq->srq_wridlist = tavor_wrid_get_list(1 << log_srq_size);
 328  322          if (srq->srq_wridlist == NULL) {
 329  323                  /* Set "status" and "errormsg" and goto failure */
 330  324                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed wridlist create");
 331  325                  goto srqalloc_fail7;
 332  326          }
 333      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(srq->srq_wridlist)))
 334  327  
 335  328          srq->srq_wridlist->wl_srq_en = 1;
 336  329          srq->srq_wridlist->wl_free_list_indx = -1;
 337  330  
 338  331          /*
 339  332           * Fill in all the return arguments (if necessary).  This includes
 340  333           * real queue size and real SGLs.
 341  334           */
 342  335          if (real_sizes != NULL) {
 343  336                  real_sizes->srq_wr_sz = (1 << log_srq_size);
↓ open down ↓ 209 lines elided ↑ open up ↑
 553  546          }
 554  547  
 555  548          /*
 556  549           * Put NULL into the Tavor SRQNum-to-SRQHdl list.  This will allow any
 557  550           * in-progress events to detect that the SRQ corresponding to this
 558  551           * number has been freed.
 559  552           */
 560  553          state->ts_srqhdl[srqc->tr_indx] = NULL;
 561  554  
 562  555          mutex_exit(&srq->srq_lock);
 563      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq));
 564      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq->srq_wridlist));
 565  556  
 566  557          /*
 567  558           * Reclaim SRQC entry from hardware (using the Tavor HW2SW_SRQ
 568  559           * firmware command).  If the ownership transfer fails for any reason,
 569  560           * then it is an indication that something (either in HW or SW) has
 570  561           * gone seriously wrong.
 571  562           */
 572  563          status = tavor_cmn_ownership_cmd_post(state, HW2SW_SRQ, &srqc_entry,
 573  564              sizeof (tavor_hw_srqc_t), srqnum, sleepflag);
 574  565          if (status != TAVOR_CMD_SUCCESS) {
↓ open down ↓ 147 lines elided ↑ open up ↑
 722  713          } else {
 723  714                  new_srqinfo.qa_location = wq_location;
 724  715          }
 725  716          status = tavor_queue_alloc(state, &new_srqinfo, sleepflag);
 726  717          if (status != DDI_SUCCESS) {
 727  718                  /* Set "status" and "errormsg" and goto failure */
 728  719                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed srq");
 729  720                  goto srqmodify_fail;
 730  721          }
 731  722          buf = (uint32_t *)new_srqinfo.qa_buf_aligned;
 732      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))
 733  723  
 734  724          /*
 735  725           * Allocate the memory for the new WRE list.  This will be used later
 736  726           * when we resize the wridlist based on the new SRQ size.
 737  727           */
 738  728          wre_new = (tavor_wrid_entry_t *)kmem_zalloc((1 << log_srq_size) *
 739  729              sizeof (tavor_wrid_entry_t), sleepflag);
 740  730          if (wre_new == NULL) {
 741  731                  /* Set "status" and "errormsg" and goto failure */
 742  732                  TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE,
↓ open down ↓ 2 lines elided ↑ open up ↑
 745  735          }
 746  736  
 747  737          /*
 748  738           * Fill in the "bind" struct.  This struct provides the majority
 749  739           * of the information that will be used to distinguish between an
 750  740           * "addr" binding (as is the case here) and a "buf" binding (see
 751  741           * below).  The "bind" struct is later passed to tavor_mr_mem_bind()
 752  742           * which does most of the "heavy lifting" for the Tavor memory
 753  743           * registration routines.
 754  744           */
 755      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(bind))
 756  745          bzero(&bind, sizeof (tavor_bind_info_t));
 757  746          bind.bi_type  = TAVOR_BINDHDL_VADDR;
 758  747          bind.bi_addr  = (uint64_t)(uintptr_t)buf;
 759  748          bind.bi_len   = new_srqinfo.qa_size;
 760  749          bind.bi_as    = NULL;
 761  750          bind.bi_flags = sleepflag == TAVOR_SLEEP ? IBT_MR_SLEEP :
 762  751              IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
 763  752          if (srq->srq_is_umap) {
 764  753                  bind.bi_bypass = state->ts_cfg_profile->cp_iommu_bypass;
 765  754          } else {
↓ open down ↓ 126 lines elided ↑ open up ↑
 892  881          srq->srq_wq_bufsz  = (1 << log_srq_size);
 893  882          bcopy(&bind, &srq->srq_mrhdl->mr_bindinfo, sizeof (tavor_bind_info_t));
 894  883          srq->srq_mrhdl->mr_mttrsrcp = mtt;
 895  884          srq->srq_desc_off  = srq_desc_off;
 896  885          srq->srq_real_sizes.srq_wr_sz = (1 << log_srq_size);
 897  886  
 898  887          /* Update MR mtt pagesize */
 899  888          mr->mr_logmttpgsz = mtt_pgsize_bits;
 900  889          mutex_exit(&mr->mr_lock);
 901  890  
 902      -#ifdef __lock_lint
 903      -        mutex_enter(&srq->srq_wrid_wql->wql_lock);
 904      -#else
 905  891          if (srq->srq_wrid_wql != NULL) {
 906  892                  mutex_enter(&srq->srq_wrid_wql->wql_lock);
 907  893          }
 908      -#endif
 909  894  
 910  895          /*
 911  896           * Initialize new wridlist, if needed.
 912  897           *
 913  898           * If a wridlist already is setup on an SRQ (the QP associated with an
 914  899           * SRQ has moved "from_reset") then we must update this wridlist based
 915  900           * on the new SRQ size.  We allocate the new size of Work Request ID
 916  901           * Entries, copy over the old entries to the new list, and
 917  902           * re-initialize the srq wridlist in non-umap case
 918  903           */
↓ open down ↓ 7 lines elided ↑ open up ↑
 926  911                  /* Setup new sizes in wre */
 927  912                  srq->srq_wridlist->wl_wre = wre_new;
 928  913                  srq->srq_wridlist->wl_size = srq->srq_wq_bufsz;
 929  914  
 930  915                  if (!srq->srq_is_umap) {
 931  916                          tavor_wrid_list_srq_init(srq->srq_wridlist, srq,
 932  917                              srq_old_bufsz);
 933  918                  }
 934  919          }
 935  920  
 936      -#ifdef __lock_lint
 937      -        mutex_exit(&srq->srq_wrid_wql->wql_lock);
 938      -#else
 939  921          if (srq->srq_wrid_wql != NULL) {
 940  922                  mutex_exit(&srq->srq_wrid_wql->wql_lock);
 941  923          }
 942      -#endif
 943  924  
 944  925          /*
 945  926           * If "old" SRQ was a user-mappable SRQ that is currently mmap()'d out
 946  927           * to a user process, then we need to call devmap_devmem_remap() to
 947  928           * invalidate the mapping to the SRQ memory.  We also need to
 948  929           * invalidate the SRQ tracking information for the user mapping.
 949  930           *
 950  931           * Note: On failure, the remap really shouldn't ever happen.  So, if it
 951  932           * does, it is an indication that something has gone seriously wrong.
 952  933           * So we print a warning message and return error (knowing, of course,
↓ open down ↓ 174 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX