Print this page
5269 zfs: zpool import slow
While importing a pool all objsets are enumerated twice, once to check
the zil log chains and once to claim them. On pools with many datasets
this process might take a substantial amount of time.
Speed up the process by parallelizing it utilizing a taskq. The number
of parallel tasks is limited to 4 times the number of leaf vdevs.

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/vdev.c
          +++ new/usr/src/uts/common/fs/zfs/vdev.c
↓ open down ↓ 164 lines elided ↑ open up ↑
 165  165                  return (vd);
 166  166  
 167  167          for (int c = 0; c < vd->vdev_children; c++)
 168  168                  if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
 169  169                      NULL)
 170  170                          return (mvd);
 171  171  
 172  172          return (NULL);
 173  173  }
 174  174  
      175 +static int
      176 +vdev_count_leaves_impl(vdev_t *vd)
      177 +{
      178 +        vdev_t *mvd;
      179 +        int n = 0;
      180 +
      181 +        if (vd->vdev_children == 0)
      182 +                return (1);
      183 +
      184 +        for (int c = 0; c < vd->vdev_children; c++)
      185 +                n += vdev_count_leaves_impl(vd->vdev_child[c]);
      186 +
      187 +        return (n);
      188 +}
      189 +
      190 +int
      191 +vdev_count_leaves(spa_t *spa)
      192 +{
      193 +        return (vdev_count_leaves_impl(spa->spa_root_vdev));
      194 +}
      195 +
 175  196  void
 176  197  vdev_add_child(vdev_t *pvd, vdev_t *cvd)
 177  198  {
 178  199          size_t oldsize, newsize;
 179  200          uint64_t id = cvd->vdev_id;
 180  201          vdev_t **newchild;
 181  202  
 182  203          ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
 183  204          ASSERT(cvd->vdev_parent == NULL);
 184  205  
↓ open down ↓ 3118 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX