Print this page
    
3006 VERIFY[S,U,P] and ASSERT[S,U,P] frequently check if first argument is zero
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/zfs/dmu_traverse.c
          +++ new/usr/src/uts/common/fs/zfs/dmu_traverse.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2012 by Delphix. All rights reserved.
  24   24   */
  25   25  
  26   26  #include <sys/zfs_context.h>
  27   27  #include <sys/dmu_objset.h>
  28   28  #include <sys/dmu_traverse.h>
  29   29  #include <sys/dsl_dataset.h>
  30   30  #include <sys/dsl_dir.h>
  31   31  #include <sys/dsl_pool.h>
  32   32  #include <sys/dnode.h>
  33   33  #include <sys/spa.h>
  34   34  #include <sys/zio.h>
  35   35  #include <sys/dmu_impl.h>
  36   36  #include <sys/sa.h>
  37   37  #include <sys/sa_impl.h>
  38   38  #include <sys/callb.h>
  39   39  
  40   40  int zfs_pd_blks_max = 100;
  41   41  
  42   42  typedef struct prefetch_data {
  43   43          kmutex_t pd_mtx;
  44   44          kcondvar_t pd_cv;
  45   45          int pd_blks_max;
  46   46          int pd_blks_fetched;
  47   47          int pd_flags;
  48   48          boolean_t pd_cancel;
  49   49          boolean_t pd_exited;
  50   50  } prefetch_data_t;
  51   51  
  52   52  typedef struct traverse_data {
  53   53          spa_t *td_spa;
  54   54          uint64_t td_objset;
  55   55          blkptr_t *td_rootbp;
  56   56          uint64_t td_min_txg;
  57   57          zbookmark_t *td_resume;
  58   58          int td_flags;
  59   59          prefetch_data_t *td_pfd;
  60   60          blkptr_cb_t *td_func;
  61   61          void *td_arg;
  62   62  } traverse_data_t;
  63   63  
  64   64  static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
  65   65      arc_buf_t *buf, uint64_t objset, uint64_t object);
  66   66  
  67   67  static int
  68   68  traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
  69   69  {
  70   70          traverse_data_t *td = arg;
  71   71          zbookmark_t zb;
  72   72  
  73   73          if (bp->blk_birth == 0)
  74   74                  return (0);
  75   75  
  76   76          if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
  77   77                  return (0);
  78   78  
  79   79          SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
  80   80              bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
  81   81  
  82   82          (void) td->td_func(td->td_spa, zilog, bp, NULL, &zb, NULL, td->td_arg);
  83   83  
  84   84          return (0);
  85   85  }
  86   86  
  87   87  static int
  88   88  traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
  89   89  {
  90   90          traverse_data_t *td = arg;
  91   91  
  92   92          if (lrc->lrc_txtype == TX_WRITE) {
  93   93                  lr_write_t *lr = (lr_write_t *)lrc;
  94   94                  blkptr_t *bp = &lr->lr_blkptr;
  95   95                  zbookmark_t zb;
  96   96  
  97   97                  if (bp->blk_birth == 0)
  98   98                          return (0);
  99   99  
 100  100                  if (claim_txg == 0 || bp->blk_birth < claim_txg)
 101  101                          return (0);
 102  102  
 103  103                  SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
 104  104                      ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
 105  105  
 106  106                  (void) td->td_func(td->td_spa, zilog, bp, NULL, &zb, NULL,
 107  107                      td->td_arg);
 108  108          }
 109  109          return (0);
 110  110  }
 111  111  
 112  112  static void
 113  113  traverse_zil(traverse_data_t *td, zil_header_t *zh)
 114  114  {
 115  115          uint64_t claim_txg = zh->zh_claim_txg;
 116  116          zilog_t *zilog;
 117  117  
 118  118          /*
 119  119           * We only want to visit blocks that have been claimed but not yet
 120  120           * replayed; plus, in read-only mode, blocks that are already stable.
 121  121           */
 122  122          if (claim_txg == 0 && spa_writeable(td->td_spa))
 123  123                  return;
 124  124  
 125  125          zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
 126  126  
 127  127          (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
 128  128              claim_txg);
 129  129  
 130  130          zil_free(zilog);
 131  131  }
 132  132  
 133  133  typedef enum resume_skip {
 134  134          RESUME_SKIP_ALL,
 135  135          RESUME_SKIP_NONE,
 136  136          RESUME_SKIP_CHILDREN
 137  137  } resume_skip_t;
 138  138  
 139  139  /*
 140  140   * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
 141  141   * the block indicated by zb does not need to be visited at all. Returns
 142  142   * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
 143  143   * resume point. This indicates that this block should be visited but not its
 144  144   * children (since they must have been visited in a previous traversal).
 145  145   * Otherwise returns RESUME_SKIP_NONE.
 146  146   */
 147  147  static resume_skip_t
 148  148  resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
 149  149      const zbookmark_t *zb)
 150  150  {
 151  151          if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
 152  152                  /*
 153  153                   * If we already visited this bp & everything below,
 154  154                   * don't bother doing it again.
 155  155                   */
 156  156                  if (zbookmark_is_before(dnp, zb, td->td_resume))
 157  157                          return (RESUME_SKIP_ALL);
 158  158  
 159  159                  /*
 160  160                   * If we found the block we're trying to resume from, zero
 161  161                   * the bookmark out to indicate that we have resumed.
 162  162                   */
 163  163                  ASSERT3U(zb->zb_object, <=, td->td_resume->zb_object);
 164  164                  if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
 165  165                          bzero(td->td_resume, sizeof (*zb));
 166  166                          if (td->td_flags & TRAVERSE_POST)
  
    | 
      ↓ open down ↓ | 
    166 lines elided | 
    
      ↑ open up ↑ | 
  
 167  167                                  return (RESUME_SKIP_CHILDREN);
 168  168                  }
 169  169          }
 170  170          return (RESUME_SKIP_NONE);
 171  171  }
 172  172  
 173  173  static void
 174  174  traverse_pause(traverse_data_t *td, const zbookmark_t *zb)
 175  175  {
 176  176          ASSERT(td->td_resume != NULL);
 177      -        ASSERT3U(zb->zb_level, ==, 0);
      177 +        ASSERT0(zb->zb_level);
 178  178          bcopy(zb, td->td_resume, sizeof (*td->td_resume));
 179  179  }
 180  180  
 181  181  static int
 182  182  traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
 183  183      arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
 184  184  {
 185  185          zbookmark_t czb;
 186  186          int err = 0, lasterr = 0;
 187  187          arc_buf_t *buf = NULL;
 188  188          prefetch_data_t *pd = td->td_pfd;
 189  189          boolean_t hard = td->td_flags & TRAVERSE_HARD;
 190  190          boolean_t pause = B_FALSE;
 191  191  
 192  192          switch (resume_skip_check(td, dnp, zb)) {
 193  193          case RESUME_SKIP_ALL:
 194  194                  return (0);
 195  195          case RESUME_SKIP_CHILDREN:
 196  196                  goto post;
 197  197          case RESUME_SKIP_NONE:
 198  198                  break;
 199  199          default:
 200  200                  ASSERT(0);
 201  201          }
 202  202  
 203  203          if (BP_IS_HOLE(bp)) {
 204  204                  err = td->td_func(td->td_spa, NULL, NULL, pbuf, zb, dnp,
 205  205                      td->td_arg);
 206  206                  return (err);
 207  207          }
 208  208  
 209  209          if (bp->blk_birth <= td->td_min_txg)
 210  210                  return (0);
 211  211  
 212  212          if (pd && !pd->pd_exited &&
 213  213              ((pd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
 214  214              BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0)) {
 215  215                  mutex_enter(&pd->pd_mtx);
 216  216                  ASSERT(pd->pd_blks_fetched >= 0);
 217  217                  while (pd->pd_blks_fetched == 0 && !pd->pd_exited)
 218  218                          cv_wait(&pd->pd_cv, &pd->pd_mtx);
 219  219                  pd->pd_blks_fetched--;
 220  220                  cv_broadcast(&pd->pd_cv);
 221  221                  mutex_exit(&pd->pd_mtx);
 222  222          }
 223  223  
 224  224          if (td->td_flags & TRAVERSE_PRE) {
 225  225                  err = td->td_func(td->td_spa, NULL, bp, pbuf, zb, dnp,
 226  226                      td->td_arg);
 227  227                  if (err == TRAVERSE_VISIT_NO_CHILDREN)
 228  228                          return (0);
 229  229                  if (err == ERESTART)
 230  230                          pause = B_TRUE; /* handle pausing at a common point */
 231  231                  if (err != 0)
 232  232                          goto post;
 233  233          }
 234  234  
 235  235          if (BP_GET_LEVEL(bp) > 0) {
 236  236                  uint32_t flags = ARC_WAIT;
 237  237                  int i;
 238  238                  blkptr_t *cbp;
 239  239                  int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
 240  240  
 241  241                  err = dsl_read(NULL, td->td_spa, bp, pbuf,
 242  242                      arc_getbuf_func, &buf,
 243  243                      ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
 244  244                  if (err)
 245  245                          return (err);
 246  246  
 247  247                  /* recursively visitbp() blocks below this */
 248  248                  cbp = buf->b_data;
 249  249                  for (i = 0; i < epb; i++, cbp++) {
 250  250                          SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
 251  251                              zb->zb_level - 1,
 252  252                              zb->zb_blkid * epb + i);
 253  253                          err = traverse_visitbp(td, dnp, buf, cbp, &czb);
 254  254                          if (err) {
 255  255                                  if (!hard)
 256  256                                          break;
 257  257                                  lasterr = err;
 258  258                          }
 259  259                  }
 260  260          } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
 261  261                  uint32_t flags = ARC_WAIT;
 262  262                  int i;
 263  263                  int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
 264  264  
 265  265                  err = dsl_read(NULL, td->td_spa, bp, pbuf,
 266  266                      arc_getbuf_func, &buf,
 267  267                      ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
 268  268                  if (err)
 269  269                          return (err);
 270  270  
 271  271                  /* recursively visitbp() blocks below this */
 272  272                  dnp = buf->b_data;
 273  273                  for (i = 0; i < epb; i++, dnp++) {
 274  274                          err = traverse_dnode(td, dnp, buf, zb->zb_objset,
 275  275                              zb->zb_blkid * epb + i);
 276  276                          if (err) {
 277  277                                  if (!hard)
 278  278                                          break;
 279  279                                  lasterr = err;
 280  280                          }
 281  281                  }
 282  282          } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
 283  283                  uint32_t flags = ARC_WAIT;
 284  284                  objset_phys_t *osp;
 285  285                  dnode_phys_t *dnp;
 286  286  
 287  287                  err = dsl_read_nolock(NULL, td->td_spa, bp,
 288  288                      arc_getbuf_func, &buf,
 289  289                      ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
 290  290                  if (err)
 291  291                          return (err);
 292  292  
 293  293                  osp = buf->b_data;
 294  294                  dnp = &osp->os_meta_dnode;
 295  295                  err = traverse_dnode(td, dnp, buf, zb->zb_objset,
 296  296                      DMU_META_DNODE_OBJECT);
 297  297                  if (err && hard) {
 298  298                          lasterr = err;
 299  299                          err = 0;
 300  300                  }
 301  301                  if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
 302  302                          dnp = &osp->os_userused_dnode;
 303  303                          err = traverse_dnode(td, dnp, buf, zb->zb_objset,
 304  304                              DMU_USERUSED_OBJECT);
 305  305                  }
 306  306                  if (err && hard) {
 307  307                          lasterr = err;
 308  308                          err = 0;
 309  309                  }
 310  310                  if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
 311  311                          dnp = &osp->os_groupused_dnode;
 312  312                          err = traverse_dnode(td, dnp, buf, zb->zb_objset,
 313  313                              DMU_GROUPUSED_OBJECT);
 314  314                  }
 315  315          }
 316  316  
 317  317          if (buf)
 318  318                  (void) arc_buf_remove_ref(buf, &buf);
 319  319  
 320  320  post:
 321  321          if (err == 0 && lasterr == 0 && (td->td_flags & TRAVERSE_POST)) {
 322  322                  err = td->td_func(td->td_spa, NULL, bp, pbuf, zb, dnp,
 323  323                      td->td_arg);
 324  324                  if (err == ERESTART)
 325  325                          pause = B_TRUE;
 326  326          }
 327  327  
 328  328          if (pause && td->td_resume != NULL) {
 329  329                  ASSERT3U(err, ==, ERESTART);
 330  330                  ASSERT(!hard);
 331  331                  traverse_pause(td, zb);
 332  332          }
 333  333  
 334  334          return (err != 0 ? err : lasterr);
 335  335  }
 336  336  
 337  337  static int
 338  338  traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
 339  339      arc_buf_t *buf, uint64_t objset, uint64_t object)
 340  340  {
 341  341          int j, err = 0, lasterr = 0;
 342  342          zbookmark_t czb;
 343  343          boolean_t hard = (td->td_flags & TRAVERSE_HARD);
 344  344  
 345  345          for (j = 0; j < dnp->dn_nblkptr; j++) {
 346  346                  SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
 347  347                  err = traverse_visitbp(td, dnp, buf,
 348  348                      (blkptr_t *)&dnp->dn_blkptr[j], &czb);
 349  349                  if (err) {
 350  350                          if (!hard)
 351  351                                  break;
 352  352                          lasterr = err;
 353  353                  }
 354  354          }
 355  355  
 356  356          if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
 357  357                  SET_BOOKMARK(&czb, objset,
 358  358                      object, 0, DMU_SPILL_BLKID);
 359  359                  err = traverse_visitbp(td, dnp, buf,
 360  360                      (blkptr_t *)&dnp->dn_spill, &czb);
 361  361                  if (err) {
 362  362                          if (!hard)
 363  363                                  return (err);
 364  364                          lasterr = err;
 365  365                  }
 366  366          }
 367  367          return (err != 0 ? err : lasterr);
 368  368  }
 369  369  
 370  370  /* ARGSUSED */
 371  371  static int
 372  372  traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
 373  373      arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp,
 374  374      void *arg)
 375  375  {
 376  376          prefetch_data_t *pfd = arg;
 377  377          uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
 378  378  
 379  379          ASSERT(pfd->pd_blks_fetched >= 0);
 380  380          if (pfd->pd_cancel)
 381  381                  return (EINTR);
 382  382  
 383  383          if (bp == NULL || !((pfd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
 384  384              BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0) ||
 385  385              BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
 386  386                  return (0);
 387  387  
 388  388          mutex_enter(&pfd->pd_mtx);
 389  389          while (!pfd->pd_cancel && pfd->pd_blks_fetched >= pfd->pd_blks_max)
 390  390                  cv_wait(&pfd->pd_cv, &pfd->pd_mtx);
 391  391          pfd->pd_blks_fetched++;
 392  392          cv_broadcast(&pfd->pd_cv);
 393  393          mutex_exit(&pfd->pd_mtx);
 394  394  
 395  395          (void) dsl_read(NULL, spa, bp, pbuf, NULL, NULL,
 396  396              ZIO_PRIORITY_ASYNC_READ,
 397  397              ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
 398  398              &aflags, zb);
 399  399  
 400  400          return (0);
 401  401  }
 402  402  
 403  403  static void
 404  404  traverse_prefetch_thread(void *arg)
 405  405  {
 406  406          traverse_data_t *td_main = arg;
 407  407          traverse_data_t td = *td_main;
 408  408          zbookmark_t czb;
 409  409  
 410  410          td.td_func = traverse_prefetcher;
 411  411          td.td_arg = td_main->td_pfd;
 412  412          td.td_pfd = NULL;
 413  413  
 414  414          SET_BOOKMARK(&czb, td.td_objset,
 415  415              ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
 416  416          (void) traverse_visitbp(&td, NULL, NULL, td.td_rootbp, &czb);
 417  417  
 418  418          mutex_enter(&td_main->td_pfd->pd_mtx);
 419  419          td_main->td_pfd->pd_exited = B_TRUE;
 420  420          cv_broadcast(&td_main->td_pfd->pd_cv);
 421  421          mutex_exit(&td_main->td_pfd->pd_mtx);
 422  422  }
 423  423  
 424  424  /*
 425  425   * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
 426  426   * in syncing context).
 427  427   */
 428  428  static int
 429  429  traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
 430  430      uint64_t txg_start, zbookmark_t *resume, int flags,
 431  431      blkptr_cb_t func, void *arg)
 432  432  {
 433  433          traverse_data_t td;
 434  434          prefetch_data_t pd = { 0 };
 435  435          zbookmark_t czb;
 436  436          int err;
 437  437  
 438  438          ASSERT(ds == NULL || objset == ds->ds_object);
 439  439          ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
 440  440  
 441  441          td.td_spa = spa;
 442  442          td.td_objset = objset;
 443  443          td.td_rootbp = rootbp;
 444  444          td.td_min_txg = txg_start;
 445  445          td.td_resume = resume;
 446  446          td.td_func = func;
 447  447          td.td_arg = arg;
 448  448          td.td_pfd = &pd;
 449  449          td.td_flags = flags;
 450  450  
 451  451          pd.pd_blks_max = zfs_pd_blks_max;
 452  452          pd.pd_flags = flags;
 453  453          mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
 454  454          cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
 455  455  
 456  456          /* See comment on ZIL traversal in dsl_scan_visitds. */
 457  457          if (ds != NULL && !dsl_dataset_is_snapshot(ds)) {
 458  458                  objset_t *os;
 459  459  
 460  460                  err = dmu_objset_from_ds(ds, &os);
 461  461                  if (err)
 462  462                          return (err);
 463  463  
 464  464                  traverse_zil(&td, &os->os_zil_header);
 465  465          }
 466  466  
 467  467          if (!(flags & TRAVERSE_PREFETCH) ||
 468  468              0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
 469  469              &td, TQ_NOQUEUE))
 470  470                  pd.pd_exited = B_TRUE;
 471  471  
 472  472          SET_BOOKMARK(&czb, td.td_objset,
 473  473              ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
 474  474          err = traverse_visitbp(&td, NULL, NULL, rootbp, &czb);
 475  475  
 476  476          mutex_enter(&pd.pd_mtx);
 477  477          pd.pd_cancel = B_TRUE;
 478  478          cv_broadcast(&pd.pd_cv);
 479  479          while (!pd.pd_exited)
 480  480                  cv_wait(&pd.pd_cv, &pd.pd_mtx);
 481  481          mutex_exit(&pd.pd_mtx);
 482  482  
 483  483          mutex_destroy(&pd.pd_mtx);
 484  484          cv_destroy(&pd.pd_cv);
 485  485  
 486  486          return (err);
 487  487  }
 488  488  
 489  489  /*
 490  490   * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
 491  491   * in syncing context).
 492  492   */
 493  493  int
 494  494  traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
 495  495      blkptr_cb_t func, void *arg)
 496  496  {
 497  497          return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
 498  498              &ds->ds_phys->ds_bp, txg_start, NULL, flags, func, arg));
 499  499  }
 500  500  
 501  501  int
 502  502  traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
 503  503      uint64_t txg_start, zbookmark_t *resume, int flags,
 504  504      blkptr_cb_t func, void *arg)
 505  505  {
 506  506          return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
 507  507              blkptr, txg_start, resume, flags, func, arg));
 508  508  }
 509  509  
 510  510  /*
 511  511   * NB: pool must not be changing on-disk (eg, from zdb or sync context).
 512  512   */
 513  513  int
 514  514  traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
 515  515      blkptr_cb_t func, void *arg)
 516  516  {
 517  517          int err, lasterr = 0;
 518  518          uint64_t obj;
 519  519          dsl_pool_t *dp = spa_get_dsl(spa);
 520  520          objset_t *mos = dp->dp_meta_objset;
 521  521          boolean_t hard = (flags & TRAVERSE_HARD);
 522  522  
 523  523          /* visit the MOS */
 524  524          err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
 525  525              txg_start, NULL, flags, func, arg);
 526  526          if (err)
 527  527                  return (err);
 528  528  
 529  529          /* visit each dataset */
 530  530          for (obj = 1; err == 0 || (err != ESRCH && hard);
 531  531              err = dmu_object_next(mos, &obj, FALSE, txg_start)) {
 532  532                  dmu_object_info_t doi;
 533  533  
 534  534                  err = dmu_object_info(mos, obj, &doi);
 535  535                  if (err) {
 536  536                          if (!hard)
 537  537                                  return (err);
 538  538                          lasterr = err;
 539  539                          continue;
 540  540                  }
 541  541  
 542  542                  if (doi.doi_type == DMU_OT_DSL_DATASET) {
 543  543                          dsl_dataset_t *ds;
 544  544                          uint64_t txg = txg_start;
 545  545  
 546  546                          rw_enter(&dp->dp_config_rwlock, RW_READER);
 547  547                          err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
 548  548                          rw_exit(&dp->dp_config_rwlock);
 549  549                          if (err) {
 550  550                                  if (!hard)
 551  551                                          return (err);
 552  552                                  lasterr = err;
 553  553                                  continue;
 554  554                          }
 555  555                          if (ds->ds_phys->ds_prev_snap_txg > txg)
 556  556                                  txg = ds->ds_phys->ds_prev_snap_txg;
 557  557                          err = traverse_dataset(ds, txg, flags, func, arg);
 558  558                          dsl_dataset_rele(ds, FTAG);
 559  559                          if (err) {
 560  560                                  if (!hard)
 561  561                                          return (err);
 562  562                                  lasterr = err;
 563  563                          }
 564  564                  }
 565  565          }
 566  566          if (err == ESRCH)
 567  567                  err = 0;
 568  568          return (err != 0 ? err : lasterr);
 569  569  }
  
    | 
      ↓ open down ↓ | 
    382 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX