Print this page
5269 zfs: zpool import slow
PORTING: this code relies on the property of taskq_wait to wait
until no more tasks are queued and no more tasks are active. As
we always queue new tasks from within other tasks, task_wait
reliably waits for the full recursion to finish, even though we
enqueue new tasks after taskq_wait has been called.
On platforms other than illumos, taskq_wait may not have this
property.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: George Wilson <george.wilson@delphix.com>


 161 
 162         return (NULL);
 163 }
 164 
 165 vdev_t *
 166 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
 167 {
 168         vdev_t *mvd;
 169 
 170         if (vd->vdev_guid == guid)
 171                 return (vd);
 172 
 173         for (int c = 0; c < vd->vdev_children; c++)
 174                 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
 175                     NULL)
 176                         return (mvd);
 177 
 178         return (NULL);
 179 }
 180 




















 181 void
 182 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
 183 {
 184         size_t oldsize, newsize;
 185         uint64_t id = cvd->vdev_id;
 186         vdev_t **newchild;
 187 
 188         ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
 189         ASSERT(cvd->vdev_parent == NULL);
 190 
 191         cvd->vdev_parent = pvd;
 192 
 193         if (pvd == NULL)
 194                 return;
 195 
 196         ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
 197 
 198         oldsize = pvd->vdev_children * sizeof (vdev_t *);
 199         pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
 200         newsize = pvd->vdev_children * sizeof (vdev_t *);




 161 
 162         return (NULL);
 163 }
 164 
 165 vdev_t *
 166 vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
 167 {
 168         vdev_t *mvd;
 169 
 170         if (vd->vdev_guid == guid)
 171                 return (vd);
 172 
 173         for (int c = 0; c < vd->vdev_children; c++)
 174                 if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
 175                     NULL)
 176                         return (mvd);
 177 
 178         return (NULL);
 179 }
 180 
 181 static int
 182 vdev_count_leaves_impl(vdev_t *vd)
 183 {
 184         int n = 0;
 185 
 186         if (vd->vdev_ops->vdev_op_leaf)
 187                 return (1);
 188 
 189         for (int c = 0; c < vd->vdev_children; c++)
 190                 n += vdev_count_leaves_impl(vd->vdev_child[c]);
 191 
 192         return (n);
 193 }
 194 
 195 int
 196 vdev_count_leaves(spa_t *spa)
 197 {
 198         return (vdev_count_leaves_impl(spa->spa_root_vdev));
 199 }
 200 
 201 void
 202 vdev_add_child(vdev_t *pvd, vdev_t *cvd)
 203 {
 204         size_t oldsize, newsize;
 205         uint64_t id = cvd->vdev_id;
 206         vdev_t **newchild;
 207 
 208         ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
 209         ASSERT(cvd->vdev_parent == NULL);
 210 
 211         cvd->vdev_parent = pvd;
 212 
 213         if (pvd == NULL)
 214                 return;
 215 
 216         ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
 217 
 218         oldsize = pvd->vdev_children * sizeof (vdev_t *);
 219         pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
 220         newsize = pvd->vdev_children * sizeof (vdev_t *);