Print this page
5981 Deadlock in dmu_objset_find_dp

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/rrwlock.c
          +++ new/usr/src/uts/common/fs/zfs/rrwlock.c
↓ open down ↓ 151 lines elided ↑ open up ↑
 152  152  void
 153  153  rrw_destroy(rrwlock_t *rrl)
 154  154  {
 155  155          mutex_destroy(&rrl->rr_lock);
 156  156          cv_destroy(&rrl->rr_cv);
 157  157          ASSERT(rrl->rr_writer == NULL);
 158  158          refcount_destroy(&rrl->rr_anon_rcount);
 159  159          refcount_destroy(&rrl->rr_linked_rcount);
 160  160  }
 161  161  
 162      -void
 163      -rrw_enter_read(rrwlock_t *rrl, void *tag)
      162 +static void
      163 +rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
 164  164  {
 165  165          mutex_enter(&rrl->rr_lock);
 166  166  #if !defined(DEBUG) && defined(_KERNEL)
 167  167          if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
 168  168              !rrl->rr_track_all) {
 169  169                  rrl->rr_anon_rcount.rc_count++;
 170  170                  mutex_exit(&rrl->rr_lock);
 171  171                  return;
 172  172          }
 173  173          DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
 174  174  #endif
 175  175          ASSERT(rrl->rr_writer != curthread);
 176  176          ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
 177  177  
 178  178          while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
 179      -            refcount_is_zero(&rrl->rr_anon_rcount) &&
      179 +            refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
 180  180              rrn_find(rrl) == NULL))
 181  181                  cv_wait(&rrl->rr_cv, &rrl->rr_lock);
 182  182  
 183  183          if (rrl->rr_writer_wanted || rrl->rr_track_all) {
 184  184                  /* may or may not be a re-entrant enter */
 185  185                  rrn_add(rrl, tag);
 186  186                  (void) refcount_add(&rrl->rr_linked_rcount, tag);
 187  187          } else {
 188  188                  (void) refcount_add(&rrl->rr_anon_rcount, tag);
 189  189          }
 190  190          ASSERT(rrl->rr_writer == NULL);
 191  191          mutex_exit(&rrl->rr_lock);
 192  192  }
 193  193  
 194  194  void
      195 +rrw_enter_read(rrwlock_t *rrl, void *tag)
      196 +{
      197 +        rrw_enter_read_impl(rrl, B_FALSE, tag);
      198 +}
      199 +
      200 +/*
      201 + * take a read lock even if there are pending write lock requests. if we want
      202 + * to take a lock reentrantly, but from different threads (that have a
      203 + * relationship to each other), the normal detection mechanism to overrule
      204 + * the pending writer does not work, so we have to give an explicit hint here.
      205 + */
      206 +void
      207 +rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
      208 +{
      209 +        rrw_enter_read_impl(rrl, B_TRUE, tag);
      210 +}
      211 +
      212 +
      213 +void
 195  214  rrw_enter_write(rrwlock_t *rrl)
 196  215  {
 197  216          mutex_enter(&rrl->rr_lock);
 198  217          ASSERT(rrl->rr_writer != curthread);
 199  218  
 200  219          while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
 201  220              refcount_count(&rrl->rr_linked_rcount) > 0 ||
 202  221              rrl->rr_writer != NULL) {
 203  222                  rrl->rr_writer_wanted = B_TRUE;
 204  223                  cv_wait(&rrl->rr_cv, &rrl->rr_lock);
↓ open down ↓ 172 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX