691 }
692
693 return (0);
694 }
695
696 static void
697 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
698 {
699 dsl_dir_t *dd = arg1;
700 spa_t *spa = dd->dd_pool->dp_spa;
701 struct oscarg *oa = arg2;
702 uint64_t obj;
703 dsl_dataset_t *ds;
704 blkptr_t *bp;
705
706 ASSERT(dmu_tx_is_syncing(tx));
707
708 obj = dsl_dataset_create_sync(dd, oa->lastname,
709 oa->clone_origin, oa->flags, oa->cr, tx);
710
711 VERIFY3U(0, ==, dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds));
712 bp = dsl_dataset_get_blkptr(ds);
713 if (BP_IS_HOLE(bp)) {
714 objset_t *os =
715 dmu_objset_create_impl(spa, ds, bp, oa->type, tx);
716
717 if (oa->userfunc)
718 oa->userfunc(os, oa->userarg, oa->cr, tx);
719 }
720
721 if (oa->clone_origin == NULL) {
722 spa_history_log_internal_ds(ds, "create", tx, "");
723 } else {
724 char namebuf[MAXNAMELEN];
725 dsl_dataset_name(oa->clone_origin, namebuf);
726 spa_history_log_internal_ds(ds, "clone", tx,
727 "origin=%s (%llu)", namebuf, oa->clone_origin->ds_object);
728 }
729 dsl_dataset_rele(ds, FTAG);
730 }
731
1156
1157 ASSERT(dmu_tx_is_syncing(tx));
1158 /* XXX the write_done callback should really give us the tx... */
1159 os->os_synctx = tx;
1160
1161 if (os->os_dsl_dataset == NULL) {
1162 /*
1163 * This is the MOS. If we have upgraded,
1164 * spa_max_replication() could change, so reset
1165 * os_copies here.
1166 */
1167 os->os_copies = spa_max_replication(os->os_spa);
1168 }
1169
1170 /*
1171 * Create the root block IO
1172 */
1173 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1174 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1175 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1176 VERIFY3U(0, ==, arc_release_bp(os->os_phys_buf, &os->os_phys_buf,
1177 os->os_rootbp, os->os_spa, &zb));
1178
1179 dmu_write_policy(os, NULL, 0, 0, &zp);
1180
1181 zio = arc_write(pio, os->os_spa, tx->tx_txg,
1182 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp,
1183 dmu_objset_write_ready, dmu_objset_write_done, os,
1184 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1185
1186 /*
1187 * Sync special dnodes - the parent IO for the sync is the root block
1188 */
1189 DMU_META_DNODE(os)->dn_zio = zio;
1190 dnode_sync(DMU_META_DNODE(os), tx);
1191
1192 os->os_phys->os_flags = os->os_flags;
1193
1194 if (DMU_USERUSED_DNODE(os) &&
1195 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1196 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1252 {
1253 used_cbs[ost] = cb;
1254 }
1255
1256 boolean_t
1257 dmu_objset_userused_enabled(objset_t *os)
1258 {
1259 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1260 used_cbs[os->os_phys->os_type] != NULL &&
1261 DMU_USERUSED_DNODE(os) != NULL);
1262 }
1263
1264 static void
1265 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1266 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1267 {
1268 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1269 int64_t delta = DNODE_SIZE + used;
1270 if (subtract)
1271 delta = -delta;
1272 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT,
1273 user, delta, tx));
1274 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1275 group, delta, tx));
1276 }
1277 }
1278
1279 void
1280 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1281 {
1282 dnode_t *dn;
1283 list_t *list = &os->os_synced_dnodes;
1284
1285 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1286
1287 while (dn = list_head(list)) {
1288 int flags;
1289 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1290 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1291 dn->dn_phys->dn_flags &
1292 DNODE_FLAG_USERUSED_ACCOUNTED);
1293
1294 /* Allocate the user/groupused objects if necessary. */
|
691 }
692
693 return (0);
694 }
695
696 static void
697 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
698 {
699 dsl_dir_t *dd = arg1;
700 spa_t *spa = dd->dd_pool->dp_spa;
701 struct oscarg *oa = arg2;
702 uint64_t obj;
703 dsl_dataset_t *ds;
704 blkptr_t *bp;
705
706 ASSERT(dmu_tx_is_syncing(tx));
707
708 obj = dsl_dataset_create_sync(dd, oa->lastname,
709 oa->clone_origin, oa->flags, oa->cr, tx);
710
711 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds));
712 bp = dsl_dataset_get_blkptr(ds);
713 if (BP_IS_HOLE(bp)) {
714 objset_t *os =
715 dmu_objset_create_impl(spa, ds, bp, oa->type, tx);
716
717 if (oa->userfunc)
718 oa->userfunc(os, oa->userarg, oa->cr, tx);
719 }
720
721 if (oa->clone_origin == NULL) {
722 spa_history_log_internal_ds(ds, "create", tx, "");
723 } else {
724 char namebuf[MAXNAMELEN];
725 dsl_dataset_name(oa->clone_origin, namebuf);
726 spa_history_log_internal_ds(ds, "clone", tx,
727 "origin=%s (%llu)", namebuf, oa->clone_origin->ds_object);
728 }
729 dsl_dataset_rele(ds, FTAG);
730 }
731
1156
1157 ASSERT(dmu_tx_is_syncing(tx));
1158 /* XXX the write_done callback should really give us the tx... */
1159 os->os_synctx = tx;
1160
1161 if (os->os_dsl_dataset == NULL) {
1162 /*
1163 * This is the MOS. If we have upgraded,
1164 * spa_max_replication() could change, so reset
1165 * os_copies here.
1166 */
1167 os->os_copies = spa_max_replication(os->os_spa);
1168 }
1169
1170 /*
1171 * Create the root block IO
1172 */
1173 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1174 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1175 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1176 VERIFY0(arc_release_bp(os->os_phys_buf, &os->os_phys_buf,
1177 os->os_rootbp, os->os_spa, &zb));
1178
1179 dmu_write_policy(os, NULL, 0, 0, &zp);
1180
1181 zio = arc_write(pio, os->os_spa, tx->tx_txg,
1182 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp,
1183 dmu_objset_write_ready, dmu_objset_write_done, os,
1184 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1185
1186 /*
1187 * Sync special dnodes - the parent IO for the sync is the root block
1188 */
1189 DMU_META_DNODE(os)->dn_zio = zio;
1190 dnode_sync(DMU_META_DNODE(os), tx);
1191
1192 os->os_phys->os_flags = os->os_flags;
1193
1194 if (DMU_USERUSED_DNODE(os) &&
1195 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1196 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1252 {
1253 used_cbs[ost] = cb;
1254 }
1255
1256 boolean_t
1257 dmu_objset_userused_enabled(objset_t *os)
1258 {
1259 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1260 used_cbs[os->os_phys->os_type] != NULL &&
1261 DMU_USERUSED_DNODE(os) != NULL);
1262 }
1263
1264 static void
1265 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1266 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1267 {
1268 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1269 int64_t delta = DNODE_SIZE + used;
1270 if (subtract)
1271 delta = -delta;
1272 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT,
1273 user, delta, tx));
1274 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1275 group, delta, tx));
1276 }
1277 }
1278
1279 void
1280 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1281 {
1282 dnode_t *dn;
1283 list_t *list = &os->os_synced_dnodes;
1284
1285 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1286
1287 while (dn = list_head(list)) {
1288 int flags;
1289 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1290 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1291 dn->dn_phys->dn_flags &
1292 DNODE_FLAG_USERUSED_ACCOUNTED);
1293
1294 /* Allocate the user/groupused objects if necessary. */
|