1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright (c) 2012 Joyent, Inc. All rights reserved.
25 */
26
27 #include <sys/dmu.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dsl_deleg.h>
35 #include <sys/spa.h>
36 #include <sys/metaslab.h>
37 #include <sys/zap.h>
38 #include <sys/zio.h>
39 #include <sys/arc.h>
40 #include <sys/sunddi.h>
41 #include <sys/zfs_zone.h>
42 #include <sys/zfeature.h>
43 #include <sys/policy.h>
44 #include <sys/zfs_znode.h>
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47
48 /*
49 * Filesystem and Snapshot Limits
50 * ------------------------------
51 *
52 * These limits are used to restrict the number of filesystems and/or snapshots
53 * that can be created at a given level in the tree or below. A typical
54 * use-case is with a delegated dataset where the administrator wants to ensure
55 * that a user within the zone is not creating too many additional filesystems
56 * or snapshots, even though they're not exceeding their space quota.
57 *
58 * The count of filesystems and snapshots is stored in the dsl_dir_phys_t which
59 * impacts the on-disk format. As such, this capability is controlled by a
60 * feature flag and must be enabled to be used. Once enabled, the feature is
61 * not active until the first limit is set. At that point, future operations to
62 * create/destroy filesystems or snapshots will validate and update the counts.
63 *
64 * Because the on-disk counts will be uninitialized (0) before the feature is
65 * active, the counts are updated when a limit is first set on an uninitialized
66 * node (The filesystem/snapshot counts on a node includes all of the nested
67 * filesystems/snapshots, plus the node itself. Thus, a new leaf node has a
68 * filesystem count of 1 and a snapshot count of 0. A filesystem count of 0 on
69 * a node indicates uninitialized counts on that node.) When setting a limit on
70 * an uninitialized node, the code starts at the filesystem with the new limit
71 * and descends into all sub-filesystems and updates the counts to be accurate.
72 * In practice this is lightweight since a limit is typically set when the
73 * filesystem is created and thus has no children. Once valid, changing the
74 * limit value won't require a re-traversal since the counts are already valid.
75 * When recursively fixing the counts, if a node with a limit is encountered
76 * during the descent, the counts are known to be valid and there is no need to
77 * descend into that filesystem's children. The counts on filesystems above the
78 * one with the new limit will still be uninitialized (0), unless a limit is
79 * eventually set on one of those filesystems. The counts are always recursively
80 * updated when a limit is set on a dataset, unless there is already a limit.
81 * When a new limit value is set on a filesystem with an existing limit, it is
82 * possible for the new limit to be less than the current count at that level
83 * since a user who can change the limit is also allowed to exceed the limit.
84 *
85 * Once the feature is active, then whenever a filesystem or snapshot is
86 * created, the code recurses up the tree, validating the new count against the
87 * limit at each initialized level. In practice, most levels will not have a
88 * limit set. If there is a limit at any initialized level up the tree, the
89 * check must pass or the creation will fail. Likewise, when a filesystem or
90 * snapshot is destroyed, the counts are recursively adjusted all the way up
91 * the initizized nodes in the tree. Renaming a filesystem into different point
92 * in the tree will first validate, then update the counts on each branch up to
93 * the common ancestor. A receive will also validate the counts and then update
94 * them.
95 *
96 * An exception to the above behavior is that the limit is not enforced if the
97 * user has permission to modify the limit. This is primarily so that
98 * recursive snapshots in the global zone always work. We want to prevent a
99 * denial-of-service in which a lower level delegated dataset could max out its
100 * limit and thus block recursive snapshots from being taken in the global zone.
101 * Because of this, it is possible for the snapshot count to be over the limit
102 * and snapshots taken in the global zone could cause a lower level dataset to
103 * hit or exceed its limit. The administrator taking the global zone recursive
104 * snapshot should be aware of this side-effect and behave accordingly.
105 * For consistency, the filesystem limit is also not enforced if the user can
106 * modify the limit.
107 *
108 * The filesystem limit is validated by dsl_dir_fscount_check() and updated by
109 * dsl_dir_fscount_adjust(). The snapshot limit is validated by
110 * dsl_snapcount_check() and updated by dsl_snapcount_adjust().
111 * A new limit value is validated in dsl_dir_validate_fs_ss_limit() and the
112 * filesystem counts are adjusted, if necessary, by dsl_dir_set_fs_ss_count().
113 *
114 * There is a special case when we receive a filesystem that already exists. In
115 * this case a temporary clone name of %X is created (see dmu_recv_begin). We
116 * never update the filesystem counts for temporary clones.
117 */
118
119 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
120 static void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd,
121 uint64_t value, dmu_tx_t *tx);
122
123 extern dsl_syncfunc_t dsl_prop_set_sync;
124
125 /* ARGSUSED */
126 static void
127 dsl_dir_evict(dmu_buf_t *db, void *arg)
128 {
129 dsl_dir_t *dd = arg;
130 dsl_pool_t *dp = dd->dd_pool;
131 int t;
132
133 for (t = 0; t < TXG_SIZE; t++) {
134 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
135 ASSERT(dd->dd_tempreserved[t] == 0);
136 ASSERT(dd->dd_space_towrite[t] == 0);
137 }
138
139 if (dd->dd_parent)
140 dsl_dir_close(dd->dd_parent, dd);
141
142 spa_close(dd->dd_pool->dp_spa, dd);
143
144 /*
145 * The props callback list should have been cleaned up by
146 * objset_evict().
147 */
148 list_destroy(&dd->dd_prop_cbs);
149 mutex_destroy(&dd->dd_lock);
150 kmem_free(dd, sizeof (dsl_dir_t));
151 }
152
153 int
154 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
155 const char *tail, void *tag, dsl_dir_t **ddp)
156 {
157 dmu_buf_t *dbuf;
158 dsl_dir_t *dd;
159 int err;
160
161 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
162 dsl_pool_sync_context(dp));
163
164 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
165 if (err)
166 return (err);
167 dd = dmu_buf_get_user(dbuf);
168 #ifdef ZFS_DEBUG
169 {
170 dmu_object_info_t doi;
171 dmu_object_info_from_db(dbuf, &doi);
172 ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
173 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
174 }
175 #endif
176 if (dd == NULL) {
177 dsl_dir_t *winner;
178
179 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
180 dd->dd_object = ddobj;
181 dd->dd_dbuf = dbuf;
182 dd->dd_pool = dp;
183 dd->dd_phys = dbuf->db_data;
184 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
185
186 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
187 offsetof(dsl_prop_cb_record_t, cbr_node));
188
189 dsl_dir_snap_cmtime_update(dd);
190
191 if (dd->dd_phys->dd_parent_obj) {
192 err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
193 NULL, dd, &dd->dd_parent);
194 if (err)
195 goto errout;
196 if (tail) {
197 #ifdef ZFS_DEBUG
198 uint64_t foundobj;
199
200 err = zap_lookup(dp->dp_meta_objset,
201 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
202 tail, sizeof (foundobj), 1, &foundobj);
203 ASSERT(err || foundobj == ddobj);
204 #endif
205 (void) strcpy(dd->dd_myname, tail);
206 } else {
207 err = zap_value_search(dp->dp_meta_objset,
208 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
209 ddobj, 0, dd->dd_myname);
210 }
211 if (err)
212 goto errout;
213 } else {
214 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
215 }
216
217 if (dsl_dir_is_clone(dd)) {
218 dmu_buf_t *origin_bonus;
219 dsl_dataset_phys_t *origin_phys;
220
221 /*
222 * We can't open the origin dataset, because
223 * that would require opening this dsl_dir.
224 * Just look at its phys directly instead.
225 */
226 err = dmu_bonus_hold(dp->dp_meta_objset,
227 dd->dd_phys->dd_origin_obj, FTAG, &origin_bonus);
228 if (err)
229 goto errout;
230 origin_phys = origin_bonus->db_data;
231 dd->dd_origin_txg =
232 origin_phys->ds_creation_txg;
233 dmu_buf_rele(origin_bonus, FTAG);
234 }
235
236 winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
237 dsl_dir_evict);
238 if (winner) {
239 if (dd->dd_parent)
240 dsl_dir_close(dd->dd_parent, dd);
241 mutex_destroy(&dd->dd_lock);
242 kmem_free(dd, sizeof (dsl_dir_t));
243 dd = winner;
244 } else {
245 spa_open_ref(dp->dp_spa, dd);
246 }
247 }
248
249 /*
250 * The dsl_dir_t has both open-to-close and instantiate-to-evict
251 * holds on the spa. We need the open-to-close holds because
252 * otherwise the spa_refcnt wouldn't change when we open a
253 * dir which the spa also has open, so we could incorrectly
254 * think it was OK to unload/export/destroy the pool. We need
255 * the instantiate-to-evict hold because the dsl_dir_t has a
256 * pointer to the dd_pool, which has a pointer to the spa_t.
257 */
258 spa_open_ref(dp->dp_spa, tag);
259 ASSERT3P(dd->dd_pool, ==, dp);
260 ASSERT3U(dd->dd_object, ==, ddobj);
261 ASSERT3P(dd->dd_dbuf, ==, dbuf);
262 *ddp = dd;
263 return (0);
264
265 errout:
266 if (dd->dd_parent)
267 dsl_dir_close(dd->dd_parent, dd);
268 mutex_destroy(&dd->dd_lock);
269 kmem_free(dd, sizeof (dsl_dir_t));
270 dmu_buf_rele(dbuf, tag);
271 return (err);
272 }
273
274 void
275 dsl_dir_close(dsl_dir_t *dd, void *tag)
276 {
277 dprintf_dd(dd, "%s\n", "");
278 spa_close(dd->dd_pool->dp_spa, tag);
279 dmu_buf_rele(dd->dd_dbuf, tag);
280 }
281
282 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
283 void
284 dsl_dir_name(dsl_dir_t *dd, char *buf)
285 {
286 if (dd->dd_parent) {
287 dsl_dir_name(dd->dd_parent, buf);
288 (void) strcat(buf, "/");
289 } else {
290 buf[0] = '\0';
291 }
292 if (!MUTEX_HELD(&dd->dd_lock)) {
293 /*
294 * recursive mutex so that we can use
295 * dprintf_dd() with dd_lock held
296 */
297 mutex_enter(&dd->dd_lock);
298 (void) strcat(buf, dd->dd_myname);
299 mutex_exit(&dd->dd_lock);
300 } else {
301 (void) strcat(buf, dd->dd_myname);
302 }
303 }
304
305 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
306 int
307 dsl_dir_namelen(dsl_dir_t *dd)
308 {
309 int result = 0;
310
311 if (dd->dd_parent) {
312 /* parent's name + 1 for the "/" */
313 result = dsl_dir_namelen(dd->dd_parent) + 1;
314 }
315
316 if (!MUTEX_HELD(&dd->dd_lock)) {
317 /* see dsl_dir_name */
318 mutex_enter(&dd->dd_lock);
319 result += strlen(dd->dd_myname);
320 mutex_exit(&dd->dd_lock);
321 } else {
322 result += strlen(dd->dd_myname);
323 }
324
325 return (result);
326 }
327
328 static int
329 getcomponent(const char *path, char *component, const char **nextp)
330 {
331 char *p;
332 if ((path == NULL) || (path[0] == '\0'))
333 return (ENOENT);
334 /* This would be a good place to reserve some namespace... */
335 p = strpbrk(path, "/@");
336 if (p && (p[1] == '/' || p[1] == '@')) {
337 /* two separators in a row */
338 return (EINVAL);
339 }
340 if (p == NULL || p == path) {
341 /*
342 * if the first thing is an @ or /, it had better be an
343 * @ and it had better not have any more ats or slashes,
344 * and it had better have something after the @.
345 */
346 if (p != NULL &&
347 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
348 return (EINVAL);
349 if (strlen(path) >= MAXNAMELEN)
350 return (ENAMETOOLONG);
351 (void) strcpy(component, path);
352 p = NULL;
353 } else if (p[0] == '/') {
354 if (p-path >= MAXNAMELEN)
355 return (ENAMETOOLONG);
356 (void) strncpy(component, path, p - path);
357 component[p-path] = '\0';
358 p++;
359 } else if (p[0] == '@') {
360 /*
361 * if the next separator is an @, there better not be
362 * any more slashes.
363 */
364 if (strchr(path, '/'))
365 return (EINVAL);
366 if (p-path >= MAXNAMELEN)
367 return (ENAMETOOLONG);
368 (void) strncpy(component, path, p - path);
369 component[p-path] = '\0';
370 } else {
371 ASSERT(!"invalid p");
372 }
373 *nextp = p;
374 return (0);
375 }
376
377 /*
378 * same as dsl_open_dir, ignore the first component of name and use the
379 * spa instead
380 */
381 int
382 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
383 dsl_dir_t **ddp, const char **tailp)
384 {
385 char buf[MAXNAMELEN];
386 const char *next, *nextnext = NULL;
387 int err;
388 dsl_dir_t *dd;
389 dsl_pool_t *dp;
390 uint64_t ddobj;
391 int openedspa = FALSE;
392
393 dprintf("%s\n", name);
394
395 err = getcomponent(name, buf, &next);
396 if (err)
397 return (err);
398 if (spa == NULL) {
399 err = spa_open(buf, &spa, FTAG);
400 if (err) {
401 dprintf("spa_open(%s) failed\n", buf);
402 return (err);
403 }
404 openedspa = TRUE;
405
406 /* XXX this assertion belongs in spa_open */
407 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
408 }
409
410 dp = spa_get_dsl(spa);
411
412 rw_enter(&dp->dp_config_rwlock, RW_READER);
413 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
414 if (err) {
415 rw_exit(&dp->dp_config_rwlock);
416 if (openedspa)
417 spa_close(spa, FTAG);
418 return (err);
419 }
420
421 while (next != NULL) {
422 dsl_dir_t *child_ds;
423 err = getcomponent(next, buf, &nextnext);
424 if (err)
425 break;
426 ASSERT(next[0] != '\0');
427 if (next[0] == '@')
428 break;
429 dprintf("looking up %s in obj%lld\n",
430 buf, dd->dd_phys->dd_child_dir_zapobj);
431
432 err = zap_lookup(dp->dp_meta_objset,
433 dd->dd_phys->dd_child_dir_zapobj,
434 buf, sizeof (ddobj), 1, &ddobj);
435 if (err) {
436 if (err == ENOENT)
437 err = 0;
438 break;
439 }
440
441 err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
442 if (err)
443 break;
444 dsl_dir_close(dd, tag);
445 dd = child_ds;
446 next = nextnext;
447 }
448 rw_exit(&dp->dp_config_rwlock);
449
450 if (err) {
451 dsl_dir_close(dd, tag);
452 if (openedspa)
453 spa_close(spa, FTAG);
454 return (err);
455 }
456
457 /*
458 * It's an error if there's more than one component left, or
459 * tailp==NULL and there's any component left.
460 */
461 if (next != NULL &&
462 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
463 /* bad path name */
464 dsl_dir_close(dd, tag);
465 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
466 err = ENOENT;
467 }
468 if (tailp)
469 *tailp = next;
470 if (openedspa)
471 spa_close(spa, FTAG);
472 *ddp = dd;
473 return (err);
474 }
475
476 /*
477 * Return the dsl_dir_t, and possibly the last component which couldn't
478 * be found in *tail. Return NULL if the path is bogus, or if
479 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@'
480 * means that the last component is a snapshot.
481 */
482 int
483 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
484 {
485 return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
486 }
487
488 /*
489 * Check if the counts are already valid for this filesystem and its
490 * descendants. The counts on this filesystem, and those below, may be
491 * uninitialized due to either the use of a pre-existing pool which did not
492 * support the filesystem/snapshot limit feature, or one in which the feature
493 * had not yet been enabled.
494 *
495 * Recursively descend the filesystem tree and update the filesystem/snapshot
496 * counts on each filesystem below, then update the cumulative count on the
497 * current filesystem. If the filesystem already has a limit set on it,
498 * then we know that its counts, and the counts on the filesystems below it,
499 * have been updated to be correct, so we can skip this filesystem.
500 */
501 static int
502 dsl_dir_set_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx, uint64_t *fscnt,
503 uint64_t *sscnt)
504 {
505 uint64_t my_fs_cnt = 0;
506 uint64_t my_ss_cnt = 0;
507 uint64_t curr_ss_cnt;
508 objset_t *os = dd->dd_pool->dp_meta_objset;
509 zap_cursor_t *zc;
510 zap_attribute_t *za;
511 int err;
512 int ret = 0;
513 boolean_t limit_set = B_FALSE;
514 uint64_t fslimit, sslimit;
515 dsl_dataset_t *ds;
516
517 ASSERT(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock));
518
519 err = dsl_prop_get_dd(dd, zfs_prop_to_name(ZFS_PROP_FILESYSTEM_LIMIT),
520 8, 1, &fslimit, NULL, B_FALSE);
521 if (err == 0 && fslimit != UINT64_MAX)
522 limit_set = B_TRUE;
523
524 if (!limit_set) {
525 err = dsl_prop_get_dd(dd,
526 zfs_prop_to_name(ZFS_PROP_SNAPSHOT_LIMIT), 8, 1, &sslimit,
527 NULL, B_FALSE);
528 if (err == 0 && sslimit != UINT64_MAX)
529 limit_set = B_TRUE;
530 }
531
532 /*
533 * If the dd has a limit, we know its count is already good and we
534 * don't need to recurse down any further.
535 */
536 if (limit_set) {
537 *fscnt = dd->dd_phys->dd_filesystem_count;
538 *sscnt = dd->dd_phys->dd_snapshot_count;
539 return (ret);
540 }
541
542 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
543 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
544
545 mutex_enter(&dd->dd_lock);
546
547 /* Iterate datasets */
548 for (zap_cursor_init(zc, os, dd->dd_phys->dd_child_dir_zapobj);
549 zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
550 dsl_dir_t *chld_dd;
551 uint64_t chld_fs_cnt = 0;
552 uint64_t chld_ss_cnt = 0;
553
554 if (dsl_dir_open_obj(dd->dd_pool,
555 ZFS_DIRENT_OBJ(za->za_first_integer), NULL, FTAG,
556 &chld_dd)) {
557 ret = 1;
558 break;
559 }
560
561 if (dsl_dir_set_fs_ss_count(chld_dd, tx, &chld_fs_cnt,
562 &chld_ss_cnt)) {
563 ret = 1;
564 break;
565 }
566
567 dsl_dir_close(chld_dd, FTAG);
568
569 my_fs_cnt += chld_fs_cnt;
570 my_ss_cnt += chld_ss_cnt;
571 }
572 zap_cursor_fini(zc);
573 kmem_free(zc, sizeof (zap_cursor_t));
574 kmem_free(za, sizeof (zap_attribute_t));
575
576 /* Count snapshots */
577 if (dsl_dataset_hold_obj(dd->dd_pool, dd->dd_phys->dd_head_dataset_obj,
578 FTAG, &ds) == 0) {
579 if (zap_count(os, ds->ds_phys->ds_snapnames_zapobj,
580 &curr_ss_cnt) == 0)
581 my_ss_cnt += curr_ss_cnt;
582 else
583 ret = 1;
584 dsl_dataset_rele(ds, FTAG);
585 } else {
586 ret = 1;
587 }
588
589 /* Add 1 for self */
590 my_fs_cnt++;
591
592 /* save updated counts */
593 dmu_buf_will_dirty(dd->dd_dbuf, tx);
594 dd->dd_phys->dd_filesystem_count = my_fs_cnt;
595 dd->dd_phys->dd_snapshot_count = my_ss_cnt;
596
597 mutex_exit(&dd->dd_lock);
598
599 /* Return child dataset count plus self */
600 *fscnt = my_fs_cnt;
601 *sscnt = my_ss_cnt;
602 return (ret);
603 }
604
605 /* ARGSUSED */
606 static int
607 fs_ss_limit_feat_check(void *arg1, void *arg2, dmu_tx_t *tx)
608 {
609 return (0);
610 }
611
612 /* ARGSUSED */
613 static void
614 fs_ss_limit_feat_sync(void *arg1, void *arg2, dmu_tx_t *tx)
615 {
616 spa_t *spa = arg1;
617 zfeature_info_t *limit_feat =
618 &spa_feature_table[SPA_FEATURE_FS_SS_LIMIT];
619
620 spa_feature_incr(spa, limit_feat, tx);
621 }
622
623 /*
624 * Make sure the feature is enabled and activate it if necessary.
625 * If setting a limit, ensure the on-disk counts are valid.
626 *
627 * We do not validate the new limit, since users who can change the limit are
628 * also allowed to exceed the limit.
629 *
630 * Return -1 to force the zfs_set_prop_nvlist code down the default path to set
631 * the value in the nvlist.
632 */
633 int
634 dsl_dir_validate_fs_ss_limit(const char *ddname, uint64_t limit,
635 zfs_prop_t ptype)
636 {
637 dsl_dir_t *dd;
638 dsl_dataset_t *ds;
639 int err;
640 dmu_tx_t *tx;
641 uint64_t my_fs_cnt = 0;
642 uint64_t my_ss_cnt = 0;
643 uint64_t curr_limit;
644 spa_t *spa;
645 zfeature_info_t *limit_feat =
646 &spa_feature_table[SPA_FEATURE_FS_SS_LIMIT];
647
648 if ((err = dsl_dataset_hold(ddname, FTAG, &ds)) != 0)
649 return (err);
650
651 spa = dsl_dataset_get_spa(ds);
652 if (!spa_feature_is_enabled(spa,
653 &spa_feature_table[SPA_FEATURE_FS_SS_LIMIT])) {
654 dsl_dataset_rele(ds, FTAG);
655 return (ENOTSUP);
656 }
657
658 dd = ds->ds_dir;
659
660 if ((err = dsl_prop_get_dd(dd, zfs_prop_to_name(ptype), 8, 1,
661 &curr_limit, NULL, B_FALSE)) != 0) {
662 dsl_dataset_rele(ds, FTAG);
663 return (err);
664 }
665
666 if (limit == UINT64_MAX) {
667 /*
668 * If we had a limit, since we're now removing that limit, this
669 * is where we could decrement the feature-active counter so
670 * that the feature becomes inactive (only enabled) if we
671 * remove the last limit. However, we do not currently support
672 * deactivating the feature.
673 */
674 dsl_dataset_rele(ds, FTAG);
675 return (-1);
676 }
677
678 if (!spa_feature_is_active(spa, limit_feat)) {
679 /*
680 * Since the feature was not active and we're now setting a
681 * limit, increment the feature-active counter so that the
682 * feature becomes active for the first time.
683 *
684 * We can't update the MOS in open context, so create a sync
685 * task.
686 */
687 err = dsl_sync_task_do(dd->dd_pool, fs_ss_limit_feat_check,
688 fs_ss_limit_feat_sync, spa, (void *)1, 0);
689 if (err != 0)
690 return (err);
691 }
692
693 tx = dmu_tx_create_dd(dd);
694 if (dmu_tx_assign(tx, TXG_WAIT)) {
695 dmu_tx_abort(tx);
696 dsl_dataset_rele(ds, FTAG);
697 return (ENOSPC);
698 }
699
700 /*
701 * Since we are now setting a non-UINT64_MAX on the filesystem, we need
702 * to ensure the counts are correct. Descend down the tree from this
703 * point and update all of the counts to be accurate.
704 */
705 err = -1;
706 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
707 if (dsl_dir_set_fs_ss_count(dd, tx, &my_fs_cnt, &my_ss_cnt))
708 err = ENOSPC;
709 rw_exit(&dd->dd_pool->dp_config_rwlock);
710
711 dmu_tx_commit(tx);
712 dsl_dataset_rele(ds, FTAG);
713
714 return (err);
715 }
716
717 /*
718 * Used to determine if the filesystem_limit or snapshot_limit should be
719 * enforced. We allow the limit to be exceeded if the user has permission to
720 * write the property value. We pass in the creds that we got in the open
721 * context since we will always be the GZ root in syncing context.
722 *
723 * We can never modify these two properties within a non-global zone. In
724 * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
725 * can't use that function since we are already holding the dp_config_rwlock.
726 * In addition, we already have the dd and dealing with snapshots is simplified.
727 */
728 int
729 dsl_secpolicy_write_prop(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr)
730 {
731 int err = 0;
732 uint64_t obj;
733 dsl_dataset_t *ds;
734 uint64_t zoned;
735
736 #ifdef _KERNEL
737 if (crgetzoneid(cr) != GLOBAL_ZONEID)
738 return (EPERM);
739
740 if (secpolicy_zfs(cr) == 0)
741 return (0);
742 #endif
743
744 if ((obj = dd->dd_phys->dd_head_dataset_obj) == NULL)
745 return (ENOENT);
746
747 ASSERT(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock));
748
749 if ((err = dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds)) != 0)
750 return (err);
751
752 if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL) || zoned) {
753 /* Only root can access zoned fs's from the GZ */
754 err = EPERM;
755 } else {
756 err = dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr,
757 B_FALSE);
758 }
759
760 dsl_dataset_rele(ds, FTAG);
761 return (err);
762 }
763
764 /*
765 * Check if adding additional child filesystem(s) would exceed any filesystem
766 * limits. Note that all filesystem limits up to the root (or the highest
767 * initialized) filesystem or the given ancestor must be satisfied.
768 */
769 int
770 dsl_dir_fscount_check(dsl_dir_t *dd, uint64_t cnt, dsl_dir_t *ancestor,
771 cred_t *cr)
772 {
773 uint64_t limit;
774 int err = 0;
775
776 VERIFY(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock));
777
778 /* If we're allowed to change the limit, don't enforce the limit. */
779 if (dsl_secpolicy_write_prop(dd, ZFS_PROP_FILESYSTEM_LIMIT, cr) == 0)
780 return (0);
781
782 /*
783 * If an ancestor has been provided, stop checking the limit once we
784 * hit that dir. We need this during rename so that we don't overcount
785 * the check once we recurse up to the common ancestor.
786 */
787 if (ancestor == dd)
788 return (0);
789
790 /*
791 * If we hit an uninitialized node while recursing up the tree, we can
792 * stop since we know the counts are not valid on this node and we
793 * know we won't touch this node's counts.
794 */
795 if (dd->dd_phys->dd_filesystem_count == 0)
796 return (0);
797
798 err = dsl_prop_get_dd(dd, zfs_prop_to_name(ZFS_PROP_FILESYSTEM_LIMIT),
799 8, 1, &limit, NULL, B_FALSE);
800 if (err != 0)
801 return (err);
802
803 /* Is there a fs limit which we've hit? */
804 if ((dd->dd_phys->dd_filesystem_count + cnt) > limit)
805 return (EDQUOT);
806
807 if (dd->dd_parent != NULL)
808 err = dsl_dir_fscount_check(dd->dd_parent, cnt, ancestor, cr);
809
810 return (err);
811 }
812
813 /*
814 * Adjust the filesystem count for the specified dsl_dir_t and all parent
815 * filesystems. When a new filesystem is created, increment the count on all
816 * parents, and when a filesystem is destroyed, decrement the count.
817 */
818 void
819 dsl_dir_fscount_adjust(dsl_dir_t *dd, dmu_tx_t *tx, int64_t delta,
820 boolean_t first)
821 {
822 if (first) {
823 VERIFY(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock));
824 VERIFY(dmu_tx_is_syncing(tx));
825 }
826
827 /*
828 * When we receive an incremental stream into a filesystem that already
829 * exists, a temporary clone is created. We don't count this temporary
830 * clone, whose name begins with a '%'.
831 */
832 if (dd->dd_myname[0] == '%')
833 return;
834
835 /*
836 * If we hit an uninitialized node while recursing up the tree, we can
837 * stop since we know the counts are not valid on this node and we
838 * know we shouldn't touch this node's counts. An uninitialized count
839 * on the node indicates that either the feature has not yet been
840 * activated or there are no limits on this part of the tree.
841 */
842 if (dd->dd_phys->dd_filesystem_count == 0)
843 return;
844
845 /*
846 * On initial entry we need to check if this feature is active, but
847 * we don't want to re-check this on each recursive call. Note: the
848 * feature cannot be active if its not enabled. If the feature is not
849 * active, don't touch the on-disk count fields.
850 */
851 if (first) {
852 zfeature_info_t *quota_feat =
853 &spa_feature_table[SPA_FEATURE_FS_SS_LIMIT];
854
855 if (!spa_feature_is_active(dd->dd_pool->dp_spa, quota_feat))
856 return;
857 }
858
859 dmu_buf_will_dirty(dd->dd_dbuf, tx);
860
861 mutex_enter(&dd->dd_lock);
862
863 dd->dd_phys->dd_filesystem_count += delta;
864 VERIFY(dd->dd_phys->dd_filesystem_count >= 1); /* ourself is 1 */
865
866 /* Roll up this additional count into our ancestors */
867 if (dd->dd_parent != NULL)
868 dsl_dir_fscount_adjust(dd->dd_parent, tx, delta, B_FALSE);
869
870 mutex_exit(&dd->dd_lock);
871 }
872
873 uint64_t
874 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
875 dmu_tx_t *tx)
876 {
877 objset_t *mos = dp->dp_meta_objset;
878 uint64_t ddobj;
879 dsl_dir_phys_t *ddphys;
880 dmu_buf_t *dbuf;
881 zfeature_info_t *limit_feat =
882 &spa_feature_table[SPA_FEATURE_FS_SS_LIMIT];
883
884
885 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
886 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
887 if (pds) {
888 VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
889 name, sizeof (uint64_t), 1, &ddobj, tx));
890 } else {
891 /* it's the root dir */
892 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
893 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
894 }
895 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
896 dmu_buf_will_dirty(dbuf, tx);
897 ddphys = dbuf->db_data;
898
899 ddphys->dd_creation_time = gethrestime_sec();
900 /* Only initialize the count if the limit feature is active */
901 if (spa_feature_is_active(dp->dp_spa, limit_feat))
902 ddphys->dd_filesystem_count = 1;
903 if (pds)
904 ddphys->dd_parent_obj = pds->dd_object;
905 ddphys->dd_props_zapobj = zap_create(mos,
906 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
907 ddphys->dd_child_dir_zapobj = zap_create(mos,
908 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
909 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
910 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
911 dmu_buf_rele(dbuf, FTAG);
912
913 return (ddobj);
914 }
915
916 /* ARGSUSED */
917 int
918 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
919 {
920 dsl_dir_t *dd = arg1;
921 dsl_pool_t *dp = dd->dd_pool;
922 objset_t *mos = dp->dp_meta_objset;
923 int err;
924 uint64_t count;
925
926 /*
927 * There should be exactly two holds, both from
928 * dsl_dataset_destroy: one on the dd directory, and one on its
929 * head ds. If there are more holds, then a concurrent thread is
930 * performing a lookup inside this dir while we're trying to destroy
931 * it. To minimize this possibility, we perform this check only
932 * in syncing context and fail the operation if we encounter
933 * additional holds. The dp_config_rwlock ensures that nobody else
934 * opens it after we check.
935 */
936 if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 2)
937 return (EBUSY);
938
939 err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
940 if (err)
941 return (err);
942 if (count != 0)
943 return (EEXIST);
944
945 return (0);
946 }
947
948 void
949 dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
950 {
951 dsl_dir_t *dd = arg1;
952 objset_t *mos = dd->dd_pool->dp_meta_objset;
953 uint64_t obj;
954 dd_used_t t;
955
956 ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
957 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
958
959 /*
960 * Decrement the filesystem count for all parent filesystems.
961 *
962 * When we receive an incremental stream into a filesystem that already
963 * exists, a temporary clone is created. We never count this temporary
964 * clone, whose name begins with a '%'.
965 */
966 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
967 dsl_dir_fscount_adjust(dd->dd_parent, tx, -1, B_TRUE);
968
969 /*
970 * Remove our reservation. The impl() routine avoids setting the
971 * actual property, which would require the (already destroyed) ds.
972 */
973 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
974
975 ASSERT0(dd->dd_phys->dd_used_bytes);
976 ASSERT0(dd->dd_phys->dd_reserved);
977 for (t = 0; t < DD_USED_NUM; t++)
978 ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
979
980 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
981 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
982 VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
983 VERIFY(0 == zap_remove(mos,
984 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
985
986 obj = dd->dd_object;
987 dsl_dir_close(dd, tag);
988 VERIFY(0 == dmu_object_free(mos, obj, tx));
989 }
990
991 boolean_t
992 dsl_dir_is_clone(dsl_dir_t *dd)
993 {
994 return (dd->dd_phys->dd_origin_obj &&
995 (dd->dd_pool->dp_origin_snap == NULL ||
996 dd->dd_phys->dd_origin_obj !=
997 dd->dd_pool->dp_origin_snap->ds_object));
998 }
999
1000 void
1001 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
1002 {
1003 mutex_enter(&dd->dd_lock);
1004 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
1005 dd->dd_phys->dd_used_bytes);
1006 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
1007 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
1008 dd->dd_phys->dd_reserved);
1009 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
1010 dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
1011 (dd->dd_phys->dd_uncompressed_bytes * 100 /
1012 dd->dd_phys->dd_compressed_bytes));
1013 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1014 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
1015 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
1016 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
1017 dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
1018 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
1019 dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
1020 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
1021 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
1022 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
1023 }
1024 mutex_exit(&dd->dd_lock);
1025
1026 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1027 if (dsl_dir_is_clone(dd)) {
1028 dsl_dataset_t *ds;
1029 char buf[MAXNAMELEN];
1030
1031 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
1032 dd->dd_phys->dd_origin_obj, FTAG, &ds));
1033 dsl_dataset_name(ds, buf);
1034 dsl_dataset_rele(ds, FTAG);
1035 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
1036 }
1037 rw_exit(&dd->dd_pool->dp_config_rwlock);
1038 }
1039
1040 void
1041 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
1042 {
1043 dsl_pool_t *dp = dd->dd_pool;
1044
1045 ASSERT(dd->dd_phys);
1046
1047 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
1048 /* up the hold count until we can be written out */
1049 dmu_buf_add_ref(dd->dd_dbuf, dd);
1050 }
1051 }
1052
1053 static int64_t
1054 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
1055 {
1056 uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
1057 uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
1058 return (new_accounted - old_accounted);
1059 }
1060
1061 void
1062 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
1063 {
1064 ASSERT(dmu_tx_is_syncing(tx));
1065
1066 mutex_enter(&dd->dd_lock);
1067 ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
1068 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
1069 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
1070 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
1071 mutex_exit(&dd->dd_lock);
1072
1073 /* release the hold from dsl_dir_dirty */
1074 dmu_buf_rele(dd->dd_dbuf, dd);
1075 }
1076
1077 static uint64_t
1078 dsl_dir_space_towrite(dsl_dir_t *dd)
1079 {
1080 uint64_t space = 0;
1081 int i;
1082
1083 ASSERT(MUTEX_HELD(&dd->dd_lock));
1084
1085 for (i = 0; i < TXG_SIZE; i++) {
1086 space += dd->dd_space_towrite[i&TXG_MASK];
1087 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
1088 }
1089 return (space);
1090 }
1091
1092 /*
1093 * How much space would dd have available if ancestor had delta applied
1094 * to it? If ondiskonly is set, we're only interested in what's
1095 * on-disk, not estimated pending changes.
1096 */
1097 uint64_t
1098 dsl_dir_space_available(dsl_dir_t *dd,
1099 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1100 {
1101 uint64_t parentspace, myspace, quota, used;
1102
1103 /*
1104 * If there are no restrictions otherwise, assume we have
1105 * unlimited space available.
1106 */
1107 quota = UINT64_MAX;
1108 parentspace = UINT64_MAX;
1109
1110 if (dd->dd_parent != NULL) {
1111 parentspace = dsl_dir_space_available(dd->dd_parent,
1112 ancestor, delta, ondiskonly);
1113 }
1114
1115 mutex_enter(&dd->dd_lock);
1116 if (dd->dd_phys->dd_quota != 0)
1117 quota = dd->dd_phys->dd_quota;
1118 used = dd->dd_phys->dd_used_bytes;
1119 if (!ondiskonly)
1120 used += dsl_dir_space_towrite(dd);
1121
1122 if (dd->dd_parent == NULL) {
1123 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
1124 quota = MIN(quota, poolsize);
1125 }
1126
1127 if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
1128 /*
1129 * We have some space reserved, in addition to what our
1130 * parent gave us.
1131 */
1132 parentspace += dd->dd_phys->dd_reserved - used;
1133 }
1134
1135 if (dd == ancestor) {
1136 ASSERT(delta <= 0);
1137 ASSERT(used >= -delta);
1138 used += delta;
1139 if (parentspace != UINT64_MAX)
1140 parentspace -= delta;
1141 }
1142
1143 if (used > quota) {
1144 /* over quota */
1145 myspace = 0;
1146 } else {
1147 /*
1148 * the lesser of the space provided by our parent and
1149 * the space left in our quota
1150 */
1151 myspace = MIN(parentspace, quota - used);
1152 }
1153
1154 mutex_exit(&dd->dd_lock);
1155
1156 return (myspace);
1157 }
1158
1159 struct tempreserve {
1160 list_node_t tr_node;
1161 dsl_pool_t *tr_dp;
1162 dsl_dir_t *tr_ds;
1163 uint64_t tr_size;
1164 };
1165
1166 static int
1167 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1168 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
1169 dmu_tx_t *tx, boolean_t first)
1170 {
1171 uint64_t txg = tx->tx_txg;
1172 uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
1173 uint64_t deferred = 0;
1174 struct tempreserve *tr;
1175 int retval = EDQUOT;
1176 int txgidx = txg & TXG_MASK;
1177 int i;
1178 uint64_t ref_rsrv = 0;
1179
1180 ASSERT3U(txg, !=, 0);
1181 ASSERT3S(asize, >, 0);
1182
1183 mutex_enter(&dd->dd_lock);
1184
1185 /*
1186 * Check against the dsl_dir's quota. We don't add in the delta
1187 * when checking for over-quota because they get one free hit.
1188 */
1189 est_inflight = dsl_dir_space_towrite(dd);
1190 for (i = 0; i < TXG_SIZE; i++)
1191 est_inflight += dd->dd_tempreserved[i];
1192 used_on_disk = dd->dd_phys->dd_used_bytes;
1193
1194 /*
1195 * On the first iteration, fetch the dataset's used-on-disk and
1196 * refreservation values. Also, if checkrefquota is set, test if
1197 * allocating this space would exceed the dataset's refquota.
1198 */
1199 if (first && tx->tx_objset) {
1200 int error;
1201 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1202
1203 error = dsl_dataset_check_quota(ds, checkrefquota,
1204 asize, est_inflight, &used_on_disk, &ref_rsrv);
1205 if (error) {
1206 mutex_exit(&dd->dd_lock);
1207 return (error);
1208 }
1209 }
1210
1211 /*
1212 * If this transaction will result in a net free of space,
1213 * we want to let it through.
1214 */
1215 if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
1216 quota = UINT64_MAX;
1217 else
1218 quota = dd->dd_phys->dd_quota;
1219
1220 /*
1221 * Adjust the quota against the actual pool size at the root
1222 * minus any outstanding deferred frees.
1223 * To ensure that it's possible to remove files from a full
1224 * pool without inducing transient overcommits, we throttle
1225 * netfree transactions against a quota that is slightly larger,
1226 * but still within the pool's allocation slop. In cases where
1227 * we're very close to full, this will allow a steady trickle of
1228 * removes to get through.
1229 */
1230 if (dd->dd_parent == NULL) {
1231 spa_t *spa = dd->dd_pool->dp_spa;
1232 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
1233 deferred = metaslab_class_get_deferred(spa_normal_class(spa));
1234 if (poolsize - deferred < quota) {
1235 quota = poolsize - deferred;
1236 retval = ENOSPC;
1237 }
1238 }
1239
1240 /*
1241 * If they are requesting more space, and our current estimate
1242 * is over quota, they get to try again unless the actual
1243 * on-disk is over quota and there are no pending changes (which
1244 * may free up space for us).
1245 */
1246 if (used_on_disk + est_inflight >= quota) {
1247 if (est_inflight > 0 || used_on_disk < quota ||
1248 (retval == ENOSPC && used_on_disk < quota + deferred))
1249 retval = ERESTART;
1250 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1251 "quota=%lluK tr=%lluK err=%d\n",
1252 used_on_disk>>10, est_inflight>>10,
1253 quota>>10, asize>>10, retval);
1254 mutex_exit(&dd->dd_lock);
1255 return (retval);
1256 }
1257
1258 /* We need to up our estimated delta before dropping dd_lock */
1259 dd->dd_tempreserved[txgidx] += asize;
1260
1261 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1262 asize - ref_rsrv);
1263 mutex_exit(&dd->dd_lock);
1264
1265 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1266 tr->tr_ds = dd;
1267 tr->tr_size = asize;
1268 list_insert_tail(tr_list, tr);
1269
1270 /* see if it's OK with our parent */
1271 if (dd->dd_parent && parent_rsrv) {
1272 boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
1273
1274 return (dsl_dir_tempreserve_impl(dd->dd_parent,
1275 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
1276 } else {
1277 return (0);
1278 }
1279 }
1280
1281 /*
1282 * Reserve space in this dsl_dir, to be used in this tx's txg.
1283 * After the space has been dirtied (and dsl_dir_willuse_space()
1284 * has been called), the reservation should be canceled, using
1285 * dsl_dir_tempreserve_clear().
1286 */
1287 int
1288 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1289 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
1290 {
1291 int err;
1292 list_t *tr_list;
1293
1294 if (asize == 0) {
1295 *tr_cookiep = NULL;
1296 return (0);
1297 }
1298
1299 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1300 list_create(tr_list, sizeof (struct tempreserve),
1301 offsetof(struct tempreserve, tr_node));
1302 ASSERT3S(asize, >, 0);
1303 ASSERT3S(fsize, >=, 0);
1304
1305 err = arc_tempreserve_space(lsize, tx->tx_txg);
1306 if (err == 0) {
1307 struct tempreserve *tr;
1308
1309 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1310 tr->tr_size = lsize;
1311 list_insert_tail(tr_list, tr);
1312
1313 err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx);
1314 } else {
1315 if (err == EAGAIN) {
1316 txg_delay(dd->dd_pool, tx->tx_txg,
1317 zfs_zone_txg_delay());
1318 err = ERESTART;
1319 }
1320 dsl_pool_memory_pressure(dd->dd_pool);
1321 }
1322
1323 if (err == 0) {
1324 struct tempreserve *tr;
1325
1326 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1327 tr->tr_dp = dd->dd_pool;
1328 tr->tr_size = asize;
1329 list_insert_tail(tr_list, tr);
1330
1331 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
1332 FALSE, asize > usize, tr_list, tx, TRUE);
1333 }
1334
1335 if (err)
1336 dsl_dir_tempreserve_clear(tr_list, tx);
1337 else
1338 *tr_cookiep = tr_list;
1339
1340 return (err);
1341 }
1342
1343 /*
1344 * Clear a temporary reservation that we previously made with
1345 * dsl_dir_tempreserve_space().
1346 */
1347 void
1348 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1349 {
1350 int txgidx = tx->tx_txg & TXG_MASK;
1351 list_t *tr_list = tr_cookie;
1352 struct tempreserve *tr;
1353
1354 ASSERT3U(tx->tx_txg, !=, 0);
1355
1356 if (tr_cookie == NULL)
1357 return;
1358
1359 while (tr = list_head(tr_list)) {
1360 if (tr->tr_dp) {
1361 dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
1362 } else if (tr->tr_ds) {
1363 mutex_enter(&tr->tr_ds->dd_lock);
1364 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1365 tr->tr_size);
1366 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1367 mutex_exit(&tr->tr_ds->dd_lock);
1368 } else {
1369 arc_tempreserve_clear(tr->tr_size);
1370 }
1371 list_remove(tr_list, tr);
1372 kmem_free(tr, sizeof (struct tempreserve));
1373 }
1374
1375 kmem_free(tr_list, sizeof (list_t));
1376 }
1377
1378 static void
1379 dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1380 {
1381 int64_t parent_space;
1382 uint64_t est_used;
1383
1384 mutex_enter(&dd->dd_lock);
1385 if (space > 0)
1386 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1387
1388 est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes;
1389 parent_space = parent_delta(dd, est_used, space);
1390 mutex_exit(&dd->dd_lock);
1391
1392 /* Make sure that we clean up dd_space_to* */
1393 dsl_dir_dirty(dd, tx);
1394
1395 /* XXX this is potentially expensive and unnecessary... */
1396 if (parent_space && dd->dd_parent)
1397 dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx);
1398 }
1399
1400 /*
1401 * Call in open context when we think we're going to write/free space,
1402 * eg. when dirtying data. Be conservative (ie. OK to write less than
1403 * this or free more than this, but don't write more or free less).
1404 */
1405 void
1406 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1407 {
1408 dsl_pool_willuse_space(dd->dd_pool, space, tx);
1409 dsl_dir_willuse_space_impl(dd, space, tx);
1410 }
1411
1412 /* call from syncing context when we actually write/free space for this dd */
1413 void
1414 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1415 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1416 {
1417 int64_t accounted_delta;
1418 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1419
1420 ASSERT(dmu_tx_is_syncing(tx));
1421 ASSERT(type < DD_USED_NUM);
1422
1423 if (needlock)
1424 mutex_enter(&dd->dd_lock);
1425 accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
1426 ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
1427 ASSERT(compressed >= 0 ||
1428 dd->dd_phys->dd_compressed_bytes >= -compressed);
1429 ASSERT(uncompressed >= 0 ||
1430 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
1431 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1432 dd->dd_phys->dd_used_bytes += used;
1433 dd->dd_phys->dd_uncompressed_bytes += uncompressed;
1434 dd->dd_phys->dd_compressed_bytes += compressed;
1435
1436 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1437 ASSERT(used > 0 ||
1438 dd->dd_phys->dd_used_breakdown[type] >= -used);
1439 dd->dd_phys->dd_used_breakdown[type] += used;
1440 #ifdef DEBUG
1441 dd_used_t t;
1442 uint64_t u = 0;
1443 for (t = 0; t < DD_USED_NUM; t++)
1444 u += dd->dd_phys->dd_used_breakdown[t];
1445 ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
1446 #endif
1447 }
1448 if (needlock)
1449 mutex_exit(&dd->dd_lock);
1450
1451 if (dd->dd_parent != NULL) {
1452 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1453 accounted_delta, compressed, uncompressed, tx);
1454 dsl_dir_transfer_space(dd->dd_parent,
1455 used - accounted_delta,
1456 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1457 }
1458 }
1459
1460 void
1461 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1462 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1463 {
1464 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1465
1466 ASSERT(dmu_tx_is_syncing(tx));
1467 ASSERT(oldtype < DD_USED_NUM);
1468 ASSERT(newtype < DD_USED_NUM);
1469
1470 if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
1471 return;
1472
1473 if (needlock)
1474 mutex_enter(&dd->dd_lock);
1475 ASSERT(delta > 0 ?
1476 dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
1477 dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
1478 ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
1479 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1480 dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
1481 dd->dd_phys->dd_used_breakdown[newtype] += delta;
1482 if (needlock)
1483 mutex_exit(&dd->dd_lock);
1484 }
1485
1486 static int
1487 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
1488 {
1489 dsl_dataset_t *ds = arg1;
1490 dsl_dir_t *dd = ds->ds_dir;
1491 dsl_prop_setarg_t *psa = arg2;
1492 int err;
1493 uint64_t towrite;
1494
1495 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1496 return (err);
1497
1498 if (psa->psa_effective_value == 0)
1499 return (0);
1500
1501 mutex_enter(&dd->dd_lock);
1502 /*
1503 * If we are doing the preliminary check in open context, and
1504 * there are pending changes, then don't fail it, since the
1505 * pending changes could under-estimate the amount of space to be
1506 * freed up.
1507 */
1508 towrite = dsl_dir_space_towrite(dd);
1509 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1510 (psa->psa_effective_value < dd->dd_phys->dd_reserved ||
1511 psa->psa_effective_value < dd->dd_phys->dd_used_bytes + towrite)) {
1512 err = ENOSPC;
1513 }
1514 mutex_exit(&dd->dd_lock);
1515 return (err);
1516 }
1517
1518 static void
1519 dsl_dir_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1520 {
1521 dsl_dataset_t *ds = arg1;
1522 dsl_dir_t *dd = ds->ds_dir;
1523 dsl_prop_setarg_t *psa = arg2;
1524 uint64_t effective_value = psa->psa_effective_value;
1525
1526 dsl_prop_set_sync(ds, psa, tx);
1527 DSL_PROP_CHECK_PREDICTION(dd, psa);
1528
1529 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1530
1531 mutex_enter(&dd->dd_lock);
1532 dd->dd_phys->dd_quota = effective_value;
1533 mutex_exit(&dd->dd_lock);
1534 }
1535
1536 int
1537 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1538 {
1539 dsl_dir_t *dd;
1540 dsl_dataset_t *ds;
1541 dsl_prop_setarg_t psa;
1542 int err;
1543
1544 dsl_prop_setarg_init_uint64(&psa, "quota", source, "a);
1545
1546 err = dsl_dataset_hold(ddname, FTAG, &ds);
1547 if (err)
1548 return (err);
1549
1550 err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1551 if (err) {
1552 dsl_dataset_rele(ds, FTAG);
1553 return (err);
1554 }
1555
1556 ASSERT(ds->ds_dir == dd);
1557
1558 /*
1559 * If someone removes a file, then tries to set the quota, we want to
1560 * make sure the file freeing takes effect.
1561 */
1562 txg_wait_open(dd->dd_pool, 0);
1563
1564 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
1565 dsl_dir_set_quota_sync, ds, &psa, 0);
1566
1567 dsl_dir_close(dd, FTAG);
1568 dsl_dataset_rele(ds, FTAG);
1569 return (err);
1570 }
1571
1572 int
1573 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
1574 {
1575 dsl_dataset_t *ds = arg1;
1576 dsl_dir_t *dd = ds->ds_dir;
1577 dsl_prop_setarg_t *psa = arg2;
1578 uint64_t effective_value;
1579 uint64_t used, avail;
1580 int err;
1581
1582 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1583 return (err);
1584
1585 effective_value = psa->psa_effective_value;
1586
1587 /*
1588 * If we are doing the preliminary check in open context, the
1589 * space estimates may be inaccurate.
1590 */
1591 if (!dmu_tx_is_syncing(tx))
1592 return (0);
1593
1594 mutex_enter(&dd->dd_lock);
1595 used = dd->dd_phys->dd_used_bytes;
1596 mutex_exit(&dd->dd_lock);
1597
1598 if (dd->dd_parent) {
1599 avail = dsl_dir_space_available(dd->dd_parent,
1600 NULL, 0, FALSE);
1601 } else {
1602 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1603 }
1604
1605 if (MAX(used, effective_value) > MAX(used, dd->dd_phys->dd_reserved)) {
1606 uint64_t delta = MAX(used, effective_value) -
1607 MAX(used, dd->dd_phys->dd_reserved);
1608
1609 if (delta > avail)
1610 return (ENOSPC);
1611 if (dd->dd_phys->dd_quota > 0 &&
1612 effective_value > dd->dd_phys->dd_quota)
1613 return (ENOSPC);
1614 }
1615
1616 return (0);
1617 }
1618
1619 static void
1620 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1621 {
1622 uint64_t used;
1623 int64_t delta;
1624
1625 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1626
1627 mutex_enter(&dd->dd_lock);
1628 used = dd->dd_phys->dd_used_bytes;
1629 delta = MAX(used, value) - MAX(used, dd->dd_phys->dd_reserved);
1630 dd->dd_phys->dd_reserved = value;
1631
1632 if (dd->dd_parent != NULL) {
1633 /* Roll up this additional usage into our ancestors */
1634 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1635 delta, 0, 0, tx);
1636 }
1637 mutex_exit(&dd->dd_lock);
1638 }
1639
1640
1641 static void
1642 dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1643 {
1644 dsl_dataset_t *ds = arg1;
1645 dsl_dir_t *dd = ds->ds_dir;
1646 dsl_prop_setarg_t *psa = arg2;
1647 uint64_t value = psa->psa_effective_value;
1648
1649 dsl_prop_set_sync(ds, psa, tx);
1650 DSL_PROP_CHECK_PREDICTION(dd, psa);
1651
1652 dsl_dir_set_reservation_sync_impl(dd, value, tx);
1653 }
1654
1655 int
1656 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1657 uint64_t reservation)
1658 {
1659 dsl_dir_t *dd;
1660 dsl_dataset_t *ds;
1661 dsl_prop_setarg_t psa;
1662 int err;
1663
1664 dsl_prop_setarg_init_uint64(&psa, "reservation", source, &reservation);
1665
1666 err = dsl_dataset_hold(ddname, FTAG, &ds);
1667 if (err)
1668 return (err);
1669
1670 err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1671 if (err) {
1672 dsl_dataset_rele(ds, FTAG);
1673 return (err);
1674 }
1675
1676 ASSERT(ds->ds_dir == dd);
1677
1678 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1679 dsl_dir_set_reservation_sync, ds, &psa, 0);
1680
1681 dsl_dir_close(dd, FTAG);
1682 dsl_dataset_rele(ds, FTAG);
1683 return (err);
1684 }
1685
1686 static dsl_dir_t *
1687 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1688 {
1689 for (; ds1; ds1 = ds1->dd_parent) {
1690 dsl_dir_t *dd;
1691 for (dd = ds2; dd; dd = dd->dd_parent) {
1692 if (ds1 == dd)
1693 return (dd);
1694 }
1695 }
1696 return (NULL);
1697 }
1698
1699 /*
1700 * If delta is applied to dd, how much of that delta would be applied to
1701 * ancestor? Syncing context only.
1702 */
1703 static int64_t
1704 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1705 {
1706 if (dd == ancestor)
1707 return (delta);
1708
1709 mutex_enter(&dd->dd_lock);
1710 delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta);
1711 mutex_exit(&dd->dd_lock);
1712 return (would_change(dd->dd_parent, delta, ancestor));
1713 }
1714
1715 struct renamearg {
1716 dsl_dir_t *newparent;
1717 const char *mynewname;
1718 cred_t *cr;
1719 };
1720
1721 static int
1722 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1723 {
1724 dsl_dir_t *dd = arg1;
1725 struct renamearg *ra = arg2;
1726 dsl_pool_t *dp = dd->dd_pool;
1727 objset_t *mos = dp->dp_meta_objset;
1728 int err;
1729 uint64_t val;
1730
1731 /*
1732 * There should only be one reference, from dmu_objset_rename().
1733 * Fleeting holds are also possible (eg, from "zfs list" getting
1734 * stats), but any that are present in open context will likely
1735 * be gone by syncing context, so only fail from syncing
1736 * context.
1737 */
1738 if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 1)
1739 return (EBUSY);
1740
1741 /* check for existing name */
1742 err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1743 ra->mynewname, 8, 1, &val);
1744 if (err == 0)
1745 return (EEXIST);
1746 if (err != ENOENT)
1747 return (err);
1748
1749 if (ra->newparent != dd->dd_parent) {
1750 /* is there enough space? */
1751 uint64_t myspace =
1752 MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved);
1753
1754 /* no rename into our descendant */
1755 if (closest_common_ancestor(dd, ra->newparent) == dd)
1756 return (EINVAL);
1757
1758 if (err = dsl_dir_transfer_possible(dd->dd_parent,
1759 ra->newparent, dd, myspace, ra->cr))
1760 return (err);
1761
1762 if (dd->dd_phys->dd_filesystem_count == 0 &&
1763 dmu_tx_is_syncing(tx)) {
1764 uint64_t fs_cnt = 0;
1765 uint64_t ss_cnt = 0;
1766
1767 /*
1768 * Ensure this portion of the tree's counts have been
1769 * initialized in case the new parent has limits set.
1770 */
1771 err = dsl_dir_set_fs_ss_count(dd, tx, &fs_cnt, &ss_cnt);
1772 if (err)
1773 return (EIO);
1774 }
1775 }
1776
1777 return (0);
1778 }
1779
1780 static void
1781 dsl_dir_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1782 {
1783 dsl_dir_t *dd = arg1;
1784 struct renamearg *ra = arg2;
1785 dsl_pool_t *dp = dd->dd_pool;
1786 objset_t *mos = dp->dp_meta_objset;
1787 int err;
1788 char namebuf[MAXNAMELEN];
1789
1790 ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1791
1792 /* Log this before we change the name. */
1793 dsl_dir_name(ra->newparent, namebuf);
1794 spa_history_log_internal_dd(dd, "rename", tx,
1795 "-> %s/%s", namebuf, ra->mynewname);
1796
1797 if (ra->newparent != dd->dd_parent) {
1798 int cnt;
1799
1800 mutex_enter(&dd->dd_lock);
1801
1802 cnt = dd->dd_phys->dd_filesystem_count;
1803 dsl_dir_fscount_adjust(dd->dd_parent, tx, -cnt, B_TRUE);
1804 dsl_dir_fscount_adjust(ra->newparent, tx, cnt, B_TRUE);
1805
1806 cnt = dd->dd_phys->dd_snapshot_count;
1807 dsl_snapcount_adjust(dd->dd_parent, tx, -cnt, B_TRUE);
1808 dsl_snapcount_adjust(ra->newparent, tx, cnt, B_TRUE);
1809
1810 mutex_exit(&dd->dd_lock);
1811
1812 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1813 -dd->dd_phys->dd_used_bytes,
1814 -dd->dd_phys->dd_compressed_bytes,
1815 -dd->dd_phys->dd_uncompressed_bytes, tx);
1816 dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD,
1817 dd->dd_phys->dd_used_bytes,
1818 dd->dd_phys->dd_compressed_bytes,
1819 dd->dd_phys->dd_uncompressed_bytes, tx);
1820
1821 if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) {
1822 uint64_t unused_rsrv = dd->dd_phys->dd_reserved -
1823 dd->dd_phys->dd_used_bytes;
1824
1825 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1826 -unused_rsrv, 0, 0, tx);
1827 dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD_RSRV,
1828 unused_rsrv, 0, 0, tx);
1829 }
1830 }
1831
1832 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1833
1834 /* remove from old parent zapobj */
1835 err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1836 dd->dd_myname, tx);
1837 ASSERT0(err);
1838
1839 (void) strcpy(dd->dd_myname, ra->mynewname);
1840 dsl_dir_close(dd->dd_parent, dd);
1841 dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1842 VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1843 ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1844
1845 /* add to new parent zapobj */
1846 err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1847 dd->dd_myname, 8, 1, &dd->dd_object, tx);
1848 ASSERT0(err);
1849
1850 }
1851
1852 int
1853 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1854 {
1855 struct renamearg ra;
1856 int err;
1857
1858 /* new parent should exist */
1859 err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1860 if (err)
1861 return (err);
1862
1863 /* can't rename to different pool */
1864 if (dd->dd_pool != ra.newparent->dd_pool) {
1865 err = ENXIO;
1866 goto out;
1867 }
1868
1869 /* new name should not already exist */
1870 if (ra.mynewname == NULL) {
1871 err = EEXIST;
1872 goto out;
1873 }
1874
1875 ra.cr = CRED();
1876
1877 err = dsl_sync_task_do(dd->dd_pool,
1878 dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1879
1880 out:
1881 dsl_dir_close(ra.newparent, FTAG);
1882 return (err);
1883 }
1884
1885 int
1886 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, dsl_dir_t *moving_dd,
1887 uint64_t space, cred_t *cr)
1888 {
1889 dsl_dir_t *ancestor;
1890 int64_t adelta;
1891 uint64_t avail;
1892 int err;
1893
1894 ancestor = closest_common_ancestor(sdd, tdd);
1895 adelta = would_change(sdd, -space, ancestor);
1896 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1897 if (avail < space)
1898 return (ENOSPC);
1899
1900 if (sdd != moving_dd) {
1901 err = dsl_dir_fscount_check(tdd,
1902 moving_dd->dd_phys->dd_filesystem_count, ancestor, cr);
1903 if (err != 0)
1904 return (err);
1905 }
1906 err = dsl_snapcount_check(tdd, moving_dd->dd_phys->dd_snapshot_count,
1907 ancestor, cr);
1908 if (err != 0)
1909 return (err);
1910
1911 return (0);
1912 }
1913
1914 timestruc_t
1915 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1916 {
1917 timestruc_t t;
1918
1919 mutex_enter(&dd->dd_lock);
1920 t = dd->dd_snap_cmtime;
1921 mutex_exit(&dd->dd_lock);
1922
1923 return (t);
1924 }
1925
1926 void
1927 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1928 {
1929 timestruc_t t;
1930
1931 gethrestime(&t);
1932 mutex_enter(&dd->dd_lock);
1933 dd->dd_snap_cmtime = t;
1934 mutex_exit(&dd->dd_lock);
1935 }