Print this page
5269 zfs: zpool import slow
PORTING: this code relies on the property of taskq_wait to wait
until no more tasks are queued and no more tasks are active. As
we always queue new tasks from within other tasks, task_wait
reliably waits for the full recursion to finish, even though we
enqueue new tasks after taskq_wait has been called.
On platforms other than illumos, taskq_wait may not have this
property.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: George Wilson <george.wilson@delphix.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/spa.c
          +++ new/usr/src/uts/common/fs/zfs/spa.c
↓ open down ↓ 1730 lines elided ↑ open up ↑
1731 1731          return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1732 1732  }
1733 1733  
1734 1734  /*
1735 1735   * Check for missing log devices
1736 1736   */
1737 1737  static boolean_t
1738 1738  spa_check_logs(spa_t *spa)
1739 1739  {
1740 1740          boolean_t rv = B_FALSE;
     1741 +        dsl_pool_t *dp = spa_get_dsl(spa);
1741 1742  
1742 1743          switch (spa->spa_log_state) {
1743 1744          case SPA_LOG_MISSING:
1744 1745                  /* need to recheck in case slog has been restored */
1745 1746          case SPA_LOG_UNKNOWN:
1746      -                rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1747      -                    NULL, DS_FIND_CHILDREN) != 0);
     1747 +                rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
     1748 +                    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1748 1749                  if (rv)
1749 1750                          spa_set_log_state(spa, SPA_LOG_MISSING);
1750 1751                  break;
1751 1752          }
1752 1753          return (rv);
1753 1754  }
1754 1755  
1755 1756  static boolean_t
1756 1757  spa_passivate_log(spa_t *spa)
1757 1758  {
↓ open down ↓ 950 lines elided ↑ open up ↑
2708 2709          if (state != SPA_LOAD_TRYIMPORT) {
2709 2710                  if (error = spa_load_verify(spa))
2710 2711                          return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
2711 2712                              error));
2712 2713          }
2713 2714  
2714 2715          if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2715 2716              spa->spa_load_max_txg == UINT64_MAX)) {
2716 2717                  dmu_tx_t *tx;
2717 2718                  int need_update = B_FALSE;
     2719 +                dsl_pool_t *dp = spa_get_dsl(spa);
2718 2720  
2719 2721                  ASSERT(state != SPA_LOAD_TRYIMPORT);
2720 2722  
2721 2723                  /*
2722 2724                   * Claim log blocks that haven't been committed yet.
2723 2725                   * This must all happen in a single txg.
2724 2726                   * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2725 2727                   * invoked from zil_claim_log_block()'s i/o done callback.
2726 2728                   * Price of rollback is that we abandon the log.
2727 2729                   */
2728 2730                  spa->spa_claiming = B_TRUE;
2729 2731  
2730      -                tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2731      -                    spa_first_txg(spa));
2732      -                (void) dmu_objset_find(spa_name(spa),
     2732 +                tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
     2733 +                (void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
2733 2734                      zil_claim, tx, DS_FIND_CHILDREN);
2734 2735                  dmu_tx_commit(tx);
2735 2736  
2736 2737                  spa->spa_claiming = B_FALSE;
2737 2738  
2738 2739                  spa_set_log_state(spa, SPA_LOG_GOOD);
2739 2740                  spa->spa_sync_on = B_TRUE;
2740 2741                  txg_sync_start(spa->spa_dsl_pool);
2741 2742  
2742 2743                  /*
↓ open down ↓ 3923 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX