Print this page
5269 zfs: zpool import slow
PORTING: this code relies on the property of taskq_wait to wait
until no more tasks are queued and no more tasks are active. As
we always queue new tasks from within other tasks, task_wait
reliably waits for the full recursion to finish, even though we
enqueue new tasks after taskq_wait has been called.
On platforms other than illumos, taskq_wait may not have this
property.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: George Wilson <george.wilson@delphix.com>


 741                 dsl_dataset_phys(prev)->ds_next_clones_obj =
 742                     zap_create(dp->dp_meta_objset,
 743                     DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
 744         }
 745         VERIFY0(zap_add_int(dp->dp_meta_objset,
 746             dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
 747 
 748         dsl_dataset_rele(ds, FTAG);
 749         if (prev != dp->dp_origin_snap)
 750                 dsl_dataset_rele(prev, FTAG);
 751         return (0);
 752 }
 753 
 754 void
 755 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
 756 {
 757         ASSERT(dmu_tx_is_syncing(tx));
 758         ASSERT(dp->dp_origin_snap != NULL);
 759 
 760         VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
 761             tx, DS_FIND_CHILDREN));
 762 }
 763 
 764 /* ARGSUSED */
 765 static int
 766 upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
 767 {
 768         dmu_tx_t *tx = arg;
 769         objset_t *mos = dp->dp_meta_objset;
 770 
 771         if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
 772                 dsl_dataset_t *origin;
 773 
 774                 VERIFY0(dsl_dataset_hold_obj(dp,
 775                     dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
 776 
 777                 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
 778                         dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
 779                         dsl_dir_phys(origin->ds_dir)->dd_clones =
 780                             zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
 781                             0, tx);


 795 {
 796         ASSERT(dmu_tx_is_syncing(tx));
 797         uint64_t obj;
 798 
 799         (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
 800         VERIFY0(dsl_pool_open_special_dir(dp,
 801             FREE_DIR_NAME, &dp->dp_free_dir));
 802 
 803         /*
 804          * We can't use bpobj_alloc(), because spa_version() still
 805          * returns the old version, and we need a new-version bpobj with
 806          * subobj support.  So call dmu_object_alloc() directly.
 807          */
 808         obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
 809             SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
 810         VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
 811             DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
 812         VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
 813 
 814         VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
 815             upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN));
 816 }
 817 
 818 void
 819 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
 820 {
 821         uint64_t dsobj;
 822         dsl_dataset_t *ds;
 823 
 824         ASSERT(dmu_tx_is_syncing(tx));
 825         ASSERT(dp->dp_origin_snap == NULL);
 826         ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
 827 
 828         /* create the origin dir, ds, & snap-ds */
 829         dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
 830             NULL, 0, kcred, tx);
 831         VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
 832         dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
 833         VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
 834             dp, &dp->dp_origin_snap));
 835         dsl_dataset_rele(ds, FTAG);


1039          *
1040          * (Unlike a rwlock, which knows that N threads hold it for
1041          * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1042          * if any thread holds it for read, even if this thread doesn't).
1043          */
1044         ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1045         rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1046 }
1047 
1048 void
1049 dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1050 {
1051         rrw_exit(&dp->dp_config_rwlock, tag);
1052 }
1053 
1054 boolean_t
1055 dsl_pool_config_held(dsl_pool_t *dp)
1056 {
1057         return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1058 }








 741                 dsl_dataset_phys(prev)->ds_next_clones_obj =
 742                     zap_create(dp->dp_meta_objset,
 743                     DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
 744         }
 745         VERIFY0(zap_add_int(dp->dp_meta_objset,
 746             dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
 747 
 748         dsl_dataset_rele(ds, FTAG);
 749         if (prev != dp->dp_origin_snap)
 750                 dsl_dataset_rele(prev, FTAG);
 751         return (0);
 752 }
 753 
 754 void
 755 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
 756 {
 757         ASSERT(dmu_tx_is_syncing(tx));
 758         ASSERT(dp->dp_origin_snap != NULL);
 759 
 760         VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
 761             tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
 762 }
 763 
 764 /* ARGSUSED */
 765 static int
 766 upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
 767 {
 768         dmu_tx_t *tx = arg;
 769         objset_t *mos = dp->dp_meta_objset;
 770 
 771         if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
 772                 dsl_dataset_t *origin;
 773 
 774                 VERIFY0(dsl_dataset_hold_obj(dp,
 775                     dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
 776 
 777                 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
 778                         dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
 779                         dsl_dir_phys(origin->ds_dir)->dd_clones =
 780                             zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
 781                             0, tx);


 795 {
 796         ASSERT(dmu_tx_is_syncing(tx));
 797         uint64_t obj;
 798 
 799         (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
 800         VERIFY0(dsl_pool_open_special_dir(dp,
 801             FREE_DIR_NAME, &dp->dp_free_dir));
 802 
 803         /*
 804          * We can't use bpobj_alloc(), because spa_version() still
 805          * returns the old version, and we need a new-version bpobj with
 806          * subobj support.  So call dmu_object_alloc() directly.
 807          */
 808         obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
 809             SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
 810         VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
 811             DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
 812         VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
 813 
 814         VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
 815             upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
 816 }
 817 
 818 void
 819 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
 820 {
 821         uint64_t dsobj;
 822         dsl_dataset_t *ds;
 823 
 824         ASSERT(dmu_tx_is_syncing(tx));
 825         ASSERT(dp->dp_origin_snap == NULL);
 826         ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
 827 
 828         /* create the origin dir, ds, & snap-ds */
 829         dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
 830             NULL, 0, kcred, tx);
 831         VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
 832         dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
 833         VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
 834             dp, &dp->dp_origin_snap));
 835         dsl_dataset_rele(ds, FTAG);


1039          *
1040          * (Unlike a rwlock, which knows that N threads hold it for
1041          * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
1042          * if any thread holds it for read, even if this thread doesn't).
1043          */
1044         ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
1045         rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
1046 }
1047 
1048 void
1049 dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
1050 {
1051         rrw_exit(&dp->dp_config_rwlock, tag);
1052 }
1053 
1054 boolean_t
1055 dsl_pool_config_held(dsl_pool_t *dp)
1056 {
1057         return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
1058 }
1059 
1060 boolean_t
1061 dsl_pool_config_held_writer(dsl_pool_t *dp)
1062 {
1063         return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
1064 }