Print this page
    
3006 VERIFY[S,U,P] and ASSERT[S,U,P] frequently check if first argument is zero
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/zfs/space_map.c
          +++ new/usr/src/uts/common/fs/zfs/space_map.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  
    | 
      ↓ open down ↓ | 
    13 lines elided | 
    
      ↑ open up ↑ | 
  
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
       24 + *
  24   25   */
  25   26  
       27 +/*
       28 + * Copyright (c) 2012 by Delphix. All rights reserved.
       29 + */
       30 +
  26   31  #include <sys/zfs_context.h>
  27   32  #include <sys/spa.h>
  28   33  #include <sys/dmu.h>
  29   34  #include <sys/zio.h>
  30   35  #include <sys/space_map.h>
  31   36  
  32   37  /*
  33   38   * Space map routines.
  34   39   * NOTE: caller is responsible for all locking.
  35   40   */
  36   41  static int
  37   42  space_map_seg_compare(const void *x1, const void *x2)
  38   43  {
  39   44          const space_seg_t *s1 = x1;
  40   45          const space_seg_t *s2 = x2;
  41   46  
  42   47          if (s1->ss_start < s2->ss_start) {
  43   48                  if (s1->ss_end > s2->ss_start)
  44   49                          return (0);
  45   50                  return (-1);
  46   51          }
  47   52          if (s1->ss_start > s2->ss_start) {
  48   53                  if (s1->ss_start < s2->ss_end)
  49   54                          return (0);
  50   55                  return (1);
  51   56          }
  52   57          return (0);
  53   58  }
  54   59  
  55   60  void
  56   61  space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
  57   62          kmutex_t *lp)
  58   63  {
  59   64          bzero(sm, sizeof (*sm));
  60   65  
  61   66          cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
  62   67  
  63   68          avl_create(&sm->sm_root, space_map_seg_compare,
  64   69              sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
  65   70  
  
    | 
      ↓ open down ↓ | 
    30 lines elided | 
    
      ↑ open up ↑ | 
  
  66   71          sm->sm_start = start;
  67   72          sm->sm_size = size;
  68   73          sm->sm_shift = shift;
  69   74          sm->sm_lock = lp;
  70   75  }
  71   76  
  72   77  void
  73   78  space_map_destroy(space_map_t *sm)
  74   79  {
  75   80          ASSERT(!sm->sm_loaded && !sm->sm_loading);
  76      -        VERIFY3U(sm->sm_space, ==, 0);
       81 +        VERIFY0(sm->sm_space);
  77   82          avl_destroy(&sm->sm_root);
  78   83          cv_destroy(&sm->sm_load_cv);
  79   84  }
  80   85  
  81   86  void
  82   87  space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
  83   88  {
  84   89          avl_index_t where;
  85   90          space_seg_t ssearch, *ss_before, *ss_after, *ss;
  86   91          uint64_t end = start + size;
  87   92          int merge_before, merge_after;
  88   93  
  89   94          ASSERT(MUTEX_HELD(sm->sm_lock));
  90   95          VERIFY(size != 0);
  91   96          VERIFY3U(start, >=, sm->sm_start);
  92   97          VERIFY3U(end, <=, sm->sm_start + sm->sm_size);
  93   98          VERIFY(sm->sm_space + size <= sm->sm_size);
  94   99          VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
  95  100          VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
  96  101  
  97  102          ssearch.ss_start = start;
  98  103          ssearch.ss_end = end;
  99  104          ss = avl_find(&sm->sm_root, &ssearch, &where);
 100  105  
 101  106          if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) {
 102  107                  zfs_panic_recover("zfs: allocating allocated segment"
 103  108                      "(offset=%llu size=%llu)\n",
 104  109                      (longlong_t)start, (longlong_t)size);
 105  110                  return;
 106  111          }
 107  112  
 108  113          /* Make sure we don't overlap with either of our neighbors */
 109  114          VERIFY(ss == NULL);
 110  115  
 111  116          ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE);
 112  117          ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER);
 113  118  
 114  119          merge_before = (ss_before != NULL && ss_before->ss_end == start);
 115  120          merge_after = (ss_after != NULL && ss_after->ss_start == end);
 116  121  
 117  122          if (merge_before && merge_after) {
 118  123                  avl_remove(&sm->sm_root, ss_before);
 119  124                  if (sm->sm_pp_root) {
 120  125                          avl_remove(sm->sm_pp_root, ss_before);
 121  126                          avl_remove(sm->sm_pp_root, ss_after);
 122  127                  }
 123  128                  ss_after->ss_start = ss_before->ss_start;
 124  129                  kmem_free(ss_before, sizeof (*ss_before));
 125  130                  ss = ss_after;
 126  131          } else if (merge_before) {
 127  132                  ss_before->ss_end = end;
 128  133                  if (sm->sm_pp_root)
 129  134                          avl_remove(sm->sm_pp_root, ss_before);
 130  135                  ss = ss_before;
 131  136          } else if (merge_after) {
 132  137                  ss_after->ss_start = start;
 133  138                  if (sm->sm_pp_root)
 134  139                          avl_remove(sm->sm_pp_root, ss_after);
 135  140                  ss = ss_after;
 136  141          } else {
 137  142                  ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
 138  143                  ss->ss_start = start;
 139  144                  ss->ss_end = end;
 140  145                  avl_insert(&sm->sm_root, ss, where);
 141  146          }
 142  147  
 143  148          if (sm->sm_pp_root)
 144  149                  avl_add(sm->sm_pp_root, ss);
 145  150  
 146  151          sm->sm_space += size;
 147  152  }
 148  153  
 149  154  void
 150  155  space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
 151  156  {
 152  157          avl_index_t where;
 153  158          space_seg_t ssearch, *ss, *newseg;
 154  159          uint64_t end = start + size;
 155  160          int left_over, right_over;
 156  161  
 157  162          ASSERT(MUTEX_HELD(sm->sm_lock));
 158  163          VERIFY(size != 0);
 159  164          VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
 160  165          VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
 161  166  
 162  167          ssearch.ss_start = start;
 163  168          ssearch.ss_end = end;
 164  169          ss = avl_find(&sm->sm_root, &ssearch, &where);
 165  170  
 166  171          /* Make sure we completely overlap with someone */
 167  172          if (ss == NULL) {
 168  173                  zfs_panic_recover("zfs: freeing free segment "
 169  174                      "(offset=%llu size=%llu)",
 170  175                      (longlong_t)start, (longlong_t)size);
 171  176                  return;
 172  177          }
 173  178          VERIFY3U(ss->ss_start, <=, start);
 174  179          VERIFY3U(ss->ss_end, >=, end);
 175  180          VERIFY(sm->sm_space - size <= sm->sm_size);
 176  181  
 177  182          left_over = (ss->ss_start != start);
 178  183          right_over = (ss->ss_end != end);
 179  184  
 180  185          if (sm->sm_pp_root)
 181  186                  avl_remove(sm->sm_pp_root, ss);
 182  187  
 183  188          if (left_over && right_over) {
 184  189                  newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP);
 185  190                  newseg->ss_start = end;
 186  191                  newseg->ss_end = ss->ss_end;
 187  192                  ss->ss_end = start;
 188  193                  avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER);
 189  194                  if (sm->sm_pp_root)
 190  195                          avl_add(sm->sm_pp_root, newseg);
 191  196          } else if (left_over) {
 192  197                  ss->ss_end = start;
 193  198          } else if (right_over) {
 194  199                  ss->ss_start = end;
 195  200          } else {
 196  201                  avl_remove(&sm->sm_root, ss);
 197  202                  kmem_free(ss, sizeof (*ss));
 198  203                  ss = NULL;
 199  204          }
 200  205  
 201  206          if (sm->sm_pp_root && ss != NULL)
 202  207                  avl_add(sm->sm_pp_root, ss);
 203  208  
 204  209          sm->sm_space -= size;
 205  210  }
 206  211  
 207  212  boolean_t
 208  213  space_map_contains(space_map_t *sm, uint64_t start, uint64_t size)
 209  214  {
 210  215          avl_index_t where;
 211  216          space_seg_t ssearch, *ss;
 212  217          uint64_t end = start + size;
 213  218  
 214  219          ASSERT(MUTEX_HELD(sm->sm_lock));
 215  220          VERIFY(size != 0);
 216  221          VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
 217  222          VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
 218  223  
 219  224          ssearch.ss_start = start;
 220  225          ssearch.ss_end = end;
 221  226          ss = avl_find(&sm->sm_root, &ssearch, &where);
 222  227  
 223  228          return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end);
 224  229  }
 225  230  
 226  231  void
 227  232  space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
 228  233  {
 229  234          space_seg_t *ss;
 230  235          void *cookie = NULL;
 231  236  
 232  237          ASSERT(MUTEX_HELD(sm->sm_lock));
 233  238  
 234  239          while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
 235  240                  if (func != NULL)
 236  241                          func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
 237  242                  kmem_free(ss, sizeof (*ss));
 238  243          }
 239  244          sm->sm_space = 0;
 240  245  }
 241  246  
 242  247  void
 243  248  space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
 244  249  {
 245  250          space_seg_t *ss;
 246  251  
 247  252          ASSERT(MUTEX_HELD(sm->sm_lock));
 248  253  
 249  254          for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
 250  255                  func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
 251  256  }
 252  257  
 253  258  /*
 254  259   * Wait for any in-progress space_map_load() to complete.
 255  260   */
 256  261  void
 257  262  space_map_load_wait(space_map_t *sm)
 258  263  {
 259  264          ASSERT(MUTEX_HELD(sm->sm_lock));
 260  265  
 261  266          while (sm->sm_loading) {
 262  267                  ASSERT(!sm->sm_loaded);
 263  268                  cv_wait(&sm->sm_load_cv, sm->sm_lock);
 264  269          }
 265  270  }
 266  271  
 267  272  /*
 268  273   * Note: space_map_load() will drop sm_lock across dmu_read() calls.
 269  274   * The caller must be OK with this.
 270  275   */
 271  276  int
 272  277  space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype,
 273  278          space_map_obj_t *smo, objset_t *os)
 274  279  {
 275  280          uint64_t *entry, *entry_map, *entry_map_end;
 276  281          uint64_t bufsize, size, offset, end, space;
 277  282          uint64_t mapstart = sm->sm_start;
 278  283          int error = 0;
  
    | 
      ↓ open down ↓ | 
    192 lines elided | 
    
      ↑ open up ↑ | 
  
 279  284  
 280  285          ASSERT(MUTEX_HELD(sm->sm_lock));
 281  286          ASSERT(!sm->sm_loaded);
 282  287          ASSERT(!sm->sm_loading);
 283  288  
 284  289          sm->sm_loading = B_TRUE;
 285  290          end = smo->smo_objsize;
 286  291          space = smo->smo_alloc;
 287  292  
 288  293          ASSERT(sm->sm_ops == NULL);
 289      -        VERIFY3U(sm->sm_space, ==, 0);
      294 +        VERIFY0(sm->sm_space);
 290  295  
 291  296          if (maptype == SM_FREE) {
 292  297                  space_map_add(sm, sm->sm_start, sm->sm_size);
 293  298                  space = sm->sm_size - space;
 294  299          }
 295  300  
 296  301          bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT;
 297  302          entry_map = zio_buf_alloc(bufsize);
 298  303  
 299  304          mutex_exit(sm->sm_lock);
 300  305          if (end > bufsize)
 301  306                  dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize);
 302  307          mutex_enter(sm->sm_lock);
 303  308  
 304  309          for (offset = 0; offset < end; offset += bufsize) {
 305  310                  size = MIN(end - offset, bufsize);
 306  311                  VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
 307  312                  VERIFY(size != 0);
 308  313  
 309  314                  dprintf("object=%llu  offset=%llx  size=%llx\n",
 310  315                      smo->smo_object, offset, size);
 311  316  
 312  317                  mutex_exit(sm->sm_lock);
 313  318                  error = dmu_read(os, smo->smo_object, offset, size, entry_map,
 314  319                      DMU_READ_PREFETCH);
 315  320                  mutex_enter(sm->sm_lock);
 316  321                  if (error != 0)
 317  322                          break;
 318  323  
 319  324                  entry_map_end = entry_map + (size / sizeof (uint64_t));
 320  325                  for (entry = entry_map; entry < entry_map_end; entry++) {
 321  326                          uint64_t e = *entry;
 322  327  
 323  328                          if (SM_DEBUG_DECODE(e))         /* Skip debug entries */
 324  329                                  continue;
 325  330  
 326  331                          (SM_TYPE_DECODE(e) == maptype ?
 327  332                              space_map_add : space_map_remove)(sm,
 328  333                              (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart,
 329  334                              SM_RUN_DECODE(e) << sm->sm_shift);
 330  335                  }
 331  336          }
 332  337  
 333  338          if (error == 0) {
 334  339                  VERIFY3U(sm->sm_space, ==, space);
 335  340  
 336  341                  sm->sm_loaded = B_TRUE;
 337  342                  sm->sm_ops = ops;
 338  343                  if (ops != NULL)
 339  344                          ops->smop_load(sm);
 340  345          } else {
 341  346                  space_map_vacate(sm, NULL, NULL);
 342  347          }
 343  348  
 344  349          zio_buf_free(entry_map, bufsize);
 345  350  
 346  351          sm->sm_loading = B_FALSE;
 347  352  
 348  353          cv_broadcast(&sm->sm_load_cv);
 349  354  
 350  355          return (error);
 351  356  }
 352  357  
 353  358  void
 354  359  space_map_unload(space_map_t *sm)
 355  360  {
 356  361          ASSERT(MUTEX_HELD(sm->sm_lock));
 357  362  
 358  363          if (sm->sm_loaded && sm->sm_ops != NULL)
 359  364                  sm->sm_ops->smop_unload(sm);
 360  365  
 361  366          sm->sm_loaded = B_FALSE;
 362  367          sm->sm_ops = NULL;
 363  368  
 364  369          space_map_vacate(sm, NULL, NULL);
 365  370  }
 366  371  
 367  372  uint64_t
 368  373  space_map_maxsize(space_map_t *sm)
 369  374  {
 370  375          ASSERT(sm->sm_ops != NULL);
 371  376          return (sm->sm_ops->smop_max(sm));
 372  377  }
 373  378  
 374  379  uint64_t
 375  380  space_map_alloc(space_map_t *sm, uint64_t size)
 376  381  {
 377  382          uint64_t start;
 378  383  
 379  384          start = sm->sm_ops->smop_alloc(sm, size);
 380  385          if (start != -1ULL)
 381  386                  space_map_remove(sm, start, size);
 382  387          return (start);
 383  388  }
 384  389  
 385  390  void
 386  391  space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
 387  392  {
 388  393          sm->sm_ops->smop_claim(sm, start, size);
 389  394          space_map_remove(sm, start, size);
 390  395  }
 391  396  
 392  397  void
 393  398  space_map_free(space_map_t *sm, uint64_t start, uint64_t size)
 394  399  {
 395  400          space_map_add(sm, start, size);
 396  401          sm->sm_ops->smop_free(sm, start, size);
 397  402  }
 398  403  
 399  404  /*
 400  405   * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
 401  406   */
 402  407  void
 403  408  space_map_sync(space_map_t *sm, uint8_t maptype,
 404  409          space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
 405  410  {
 406  411          spa_t *spa = dmu_objset_spa(os);
 407  412          void *cookie = NULL;
 408  413          space_seg_t *ss;
 409  414          uint64_t bufsize, start, size, run_len;
 410  415          uint64_t *entry, *entry_map, *entry_map_end;
 411  416  
 412  417          ASSERT(MUTEX_HELD(sm->sm_lock));
 413  418  
 414  419          if (sm->sm_space == 0)
 415  420                  return;
 416  421  
 417  422          dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
 418  423              smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa),
 419  424              maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root),
 420  425              sm->sm_space);
 421  426  
 422  427          if (maptype == SM_ALLOC)
 423  428                  smo->smo_alloc += sm->sm_space;
 424  429          else
 425  430                  smo->smo_alloc -= sm->sm_space;
 426  431  
 427  432          bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t);
 428  433          bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT);
 429  434          entry_map = zio_buf_alloc(bufsize);
 430  435          entry_map_end = entry_map + (bufsize / sizeof (uint64_t));
 431  436          entry = entry_map;
 432  437  
 433  438          *entry++ = SM_DEBUG_ENCODE(1) |
 434  439              SM_DEBUG_ACTION_ENCODE(maptype) |
 435  440              SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
 436  441              SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
 437  442  
 438  443          while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
 439  444                  size = ss->ss_end - ss->ss_start;
 440  445                  start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
 441  446  
 442  447                  sm->sm_space -= size;
 443  448                  size >>= sm->sm_shift;
 444  449  
 445  450                  while (size) {
 446  451                          run_len = MIN(size, SM_RUN_MAX);
 447  452  
 448  453                          if (entry == entry_map_end) {
 449  454                                  mutex_exit(sm->sm_lock);
 450  455                                  dmu_write(os, smo->smo_object, smo->smo_objsize,
 451  456                                      bufsize, entry_map, tx);
 452  457                                  mutex_enter(sm->sm_lock);
 453  458                                  smo->smo_objsize += bufsize;
 454  459                                  entry = entry_map;
 455  460                          }
 456  461  
 457  462                          *entry++ = SM_OFFSET_ENCODE(start) |
 458  463                              SM_TYPE_ENCODE(maptype) |
 459  464                              SM_RUN_ENCODE(run_len);
 460  465  
 461  466                          start += run_len;
 462  467                          size -= run_len;
 463  468                  }
 464  469                  kmem_free(ss, sizeof (*ss));
 465  470          }
 466  471  
 467  472          if (entry != entry_map) {
  
    | 
      ↓ open down ↓ | 
    168 lines elided | 
    
      ↑ open up ↑ | 
  
 468  473                  size = (entry - entry_map) * sizeof (uint64_t);
 469  474                  mutex_exit(sm->sm_lock);
 470  475                  dmu_write(os, smo->smo_object, smo->smo_objsize,
 471  476                      size, entry_map, tx);
 472  477                  mutex_enter(sm->sm_lock);
 473  478                  smo->smo_objsize += size;
 474  479          }
 475  480  
 476  481          zio_buf_free(entry_map, bufsize);
 477  482  
 478      -        VERIFY3U(sm->sm_space, ==, 0);
      483 +        VERIFY0(sm->sm_space);
 479  484  }
 480  485  
 481  486  void
 482  487  space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
 483  488  {
 484  489          VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0);
 485  490  
 486  491          smo->smo_objsize = 0;
 487  492          smo->smo_alloc = 0;
 488  493  }
 489  494  
 490  495  /*
 491  496   * Space map reference trees.
 492  497   *
 493  498   * A space map is a collection of integers.  Every integer is either
 494  499   * in the map, or it's not.  A space map reference tree generalizes
 495  500   * the idea: it allows its members to have arbitrary reference counts,
 496  501   * as opposed to the implicit reference count of 0 or 1 in a space map.
 497  502   * This representation comes in handy when computing the union or
 498  503   * intersection of multiple space maps.  For example, the union of
 499  504   * N space maps is the subset of the reference tree with refcnt >= 1.
 500  505   * The intersection of N space maps is the subset with refcnt >= N.
 501  506   *
 502  507   * [It's very much like a Fourier transform.  Unions and intersections
 503  508   * are hard to perform in the 'space map domain', so we convert the maps
 504  509   * into the 'reference count domain', where it's trivial, then invert.]
 505  510   *
 506  511   * vdev_dtl_reassess() uses computations of this form to determine
 507  512   * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
 508  513   * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
 509  514   * has an outage wherever refcnt >= vdev_children.
 510  515   */
 511  516  static int
 512  517  space_map_ref_compare(const void *x1, const void *x2)
 513  518  {
 514  519          const space_ref_t *sr1 = x1;
 515  520          const space_ref_t *sr2 = x2;
 516  521  
 517  522          if (sr1->sr_offset < sr2->sr_offset)
 518  523                  return (-1);
 519  524          if (sr1->sr_offset > sr2->sr_offset)
 520  525                  return (1);
 521  526  
 522  527          if (sr1 < sr2)
 523  528                  return (-1);
 524  529          if (sr1 > sr2)
 525  530                  return (1);
 526  531  
 527  532          return (0);
 528  533  }
 529  534  
 530  535  void
 531  536  space_map_ref_create(avl_tree_t *t)
 532  537  {
 533  538          avl_create(t, space_map_ref_compare,
 534  539              sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
 535  540  }
 536  541  
 537  542  void
 538  543  space_map_ref_destroy(avl_tree_t *t)
 539  544  {
 540  545          space_ref_t *sr;
 541  546          void *cookie = NULL;
 542  547  
 543  548          while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
 544  549                  kmem_free(sr, sizeof (*sr));
 545  550  
 546  551          avl_destroy(t);
 547  552  }
 548  553  
 549  554  static void
 550  555  space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
 551  556  {
 552  557          space_ref_t *sr;
 553  558  
 554  559          sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
 555  560          sr->sr_offset = offset;
 556  561          sr->sr_refcnt = refcnt;
 557  562  
 558  563          avl_add(t, sr);
 559  564  }
 560  565  
 561  566  void
 562  567  space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
 563  568          int64_t refcnt)
 564  569  {
 565  570          space_map_ref_add_node(t, start, refcnt);
 566  571          space_map_ref_add_node(t, end, -refcnt);
 567  572  }
 568  573  
 569  574  /*
 570  575   * Convert (or add) a space map into a reference tree.
 571  576   */
 572  577  void
 573  578  space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt)
 574  579  {
 575  580          space_seg_t *ss;
 576  581  
 577  582          ASSERT(MUTEX_HELD(sm->sm_lock));
 578  583  
 579  584          for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
 580  585                  space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt);
 581  586  }
 582  587  
 583  588  /*
 584  589   * Convert a reference tree into a space map.  The space map will contain
 585  590   * all members of the reference tree for which refcnt >= minref.
 586  591   */
 587  592  void
 588  593  space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref)
 589  594  {
 590  595          uint64_t start = -1ULL;
 591  596          int64_t refcnt = 0;
 592  597          space_ref_t *sr;
 593  598  
 594  599          ASSERT(MUTEX_HELD(sm->sm_lock));
 595  600  
 596  601          space_map_vacate(sm, NULL, NULL);
 597  602  
 598  603          for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
 599  604                  refcnt += sr->sr_refcnt;
 600  605                  if (refcnt >= minref) {
 601  606                          if (start == -1ULL) {
 602  607                                  start = sr->sr_offset;
 603  608                          }
 604  609                  } else {
 605  610                          if (start != -1ULL) {
 606  611                                  uint64_t end = sr->sr_offset;
 607  612                                  ASSERT(start <= end);
 608  613                                  if (end > start)
 609  614                                          space_map_add(sm, start, end - start);
 610  615                                  start = -1ULL;
 611  616                          }
 612  617                  }
 613  618          }
 614  619          ASSERT(refcnt == 0);
 615  620          ASSERT(start == -1ULL);
 616  621  }
  
    | 
      ↓ open down ↓ | 
    128 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX