Print this page
zpool import speedup

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/spa.c
          +++ new/usr/src/uts/common/fs/zfs/spa.c
↓ open down ↓ 1709 lines elided ↑ open up ↑
1710 1710   */
1711 1711  static boolean_t
1712 1712  spa_check_logs(spa_t *spa)
1713 1713  {
1714 1714          boolean_t rv = B_FALSE;
1715 1715  
1716 1716          switch (spa->spa_log_state) {
1717 1717          case SPA_LOG_MISSING:
1718 1718                  /* need to recheck in case slog has been restored */
1719 1719          case SPA_LOG_UNKNOWN:
1720      -                rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1721      -                    NULL, DS_FIND_CHILDREN) != 0);
     1720 +                rv = (dmu_objset_find_parallel(spa->spa_name,
     1721 +                    zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1722 1722                  if (rv)
1723 1723                          spa_set_log_state(spa, SPA_LOG_MISSING);
1724 1724                  break;
1725 1725          }
1726 1726          return (rv);
1727 1727  }
1728 1728  
1729 1729  static boolean_t
1730 1730  spa_passivate_log(spa_t *spa)
1731 1731  {
↓ open down ↓ 921 lines elided ↑ open up ↑
2653 2653                   * Claim log blocks that haven't been committed yet.
2654 2654                   * This must all happen in a single txg.
2655 2655                   * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2656 2656                   * invoked from zil_claim_log_block()'s i/o done callback.
2657 2657                   * Price of rollback is that we abandon the log.
2658 2658                   */
2659 2659                  spa->spa_claiming = B_TRUE;
2660 2660  
2661 2661                  tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2662 2662                      spa_first_txg(spa));
2663      -                (void) dmu_objset_find(spa_name(spa),
     2663 +                (void) dmu_objset_find_parallel(spa_name(spa),
2664 2664                      zil_claim, tx, DS_FIND_CHILDREN);
2665 2665                  dmu_tx_commit(tx);
2666 2666  
2667 2667                  spa->spa_claiming = B_FALSE;
2668 2668  
2669 2669                  spa_set_log_state(spa, SPA_LOG_GOOD);
2670 2670                  spa->spa_sync_on = B_TRUE;
2671 2671                  txg_sync_start(spa->spa_dsl_pool);
2672 2672  
2673 2673                  /*
↓ open down ↓ 171 lines elided ↑ open up ↑
2845 2845   * down from userland, instead of grabbed from the configuration cache.  For the
2846 2846   * case of an open, the pool configuration will exist in the
2847 2847   * POOL_STATE_UNINITIALIZED state.
2848 2848   *
2849 2849   * The stats information (gen/count/ustats) is used to gather vdev statistics at
2850 2850   * the same time open the pool, without having to keep around the spa_t in some
2851 2851   * ambiguous state.
2852 2852   */
2853 2853  static int
2854 2854  spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2855      -    nvlist_t **config)
     2855 +    nvlist_t **config, int lock)
2856 2856  {
2857 2857          spa_t *spa;
2858 2858          spa_load_state_t state = SPA_LOAD_OPEN;
2859 2859          int error;
2860 2860          int locked = B_FALSE;
2861 2861  
2862 2862          *spapp = NULL;
2863 2863  
2864 2864          /*
2865 2865           * As disgusting as this is, we need to support recursive calls to this
2866 2866           * function because dsl_dir_open() is called during spa_load(), and ends
2867 2867           * up calling spa_open() again.  The real fix is to figure out how to
2868 2868           * avoid dsl_dir_open() calling this in the first place.
2869 2869           */
2870      -        if (mutex_owner(&spa_namespace_lock) != curthread) {
     2870 +        if (lock && (mutex_owner(&spa_namespace_lock) != curthread)) {
2871 2871                  mutex_enter(&spa_namespace_lock);
2872 2872                  locked = B_TRUE;
2873 2873          }
2874 2874  
2875 2875          if ((spa = spa_lookup(pool)) == NULL) {
2876 2876                  if (locked)
2877 2877                          mutex_exit(&spa_namespace_lock);
2878 2878                  return (SET_ERROR(ENOENT));
2879 2879          }
2880 2880  
↓ open down ↓ 76 lines elided ↑ open up ↑
2957 2957  
2958 2958          *spapp = spa;
2959 2959  
2960 2960          return (0);
2961 2961  }
2962 2962  
2963 2963  int
2964 2964  spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2965 2965      nvlist_t **config)
2966 2966  {
2967      -        return (spa_open_common(name, spapp, tag, policy, config));
     2967 +        return (spa_open_common(name, spapp, tag, policy, config, 1));
2968 2968  }
2969 2969  
2970 2970  int
2971 2971  spa_open(const char *name, spa_t **spapp, void *tag)
2972 2972  {
2973      -        return (spa_open_common(name, spapp, tag, NULL, NULL));
     2973 +        return (spa_open_common(name, spapp, tag, NULL, NULL, 1));
     2974 +}
     2975 +
     2976 +int
     2977 +spa_open_lock(const char *name, spa_t **spapp, void *tag, int lock)
     2978 +{
     2979 +        return (spa_open_common(name, spapp, tag, NULL, NULL, lock));
2974 2980  }
2975 2981  
2976 2982  /*
2977 2983   * Lookup the given spa_t, incrementing the inject count in the process,
2978 2984   * preventing it from being exported or destroyed.
2979 2985   */
2980 2986  spa_t *
2981 2987  spa_inject_addref(char *name)
2982 2988  {
2983 2989          spa_t *spa;
↓ open down ↓ 163 lines elided ↑ open up ↑
3147 3153  }
3148 3154  
3149 3155  int
3150 3156  spa_get_stats(const char *name, nvlist_t **config,
3151 3157      char *altroot, size_t buflen)
3152 3158  {
3153 3159          int error;
3154 3160          spa_t *spa;
3155 3161  
3156 3162          *config = NULL;
3157      -        error = spa_open_common(name, &spa, FTAG, NULL, config);
     3163 +        error = spa_open_common(name, &spa, FTAG, NULL, config, 1);
3158 3164  
3159 3165          if (spa != NULL) {
3160 3166                  /*
3161 3167                   * This still leaves a window of inconsistency where the spares
3162 3168                   * or l2cache devices could change and the config would be
3163 3169                   * self-inconsistent.
3164 3170                   */
3165 3171                  spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3166 3172  
3167 3173                  if (*config != NULL) {
↓ open down ↓ 3381 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX