824 }
825
826 /*
827 * Call in open context when we think we're going to write/free space,
828 * eg. when dirtying data. Be conservative (ie. OK to write less than
829 * this or free more than this, but don't write more or free less).
830 */
831 void
832 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
833 {
834 dsl_pool_willuse_space(dd->dd_pool, space, tx);
835 dsl_dir_willuse_space_impl(dd, space, tx);
836 }
837
838 /* call from syncing context when we actually write/free space for this dd */
839 void
840 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
841 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
842 {
843 int64_t accounted_delta;
844 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
845
846 ASSERT(dmu_tx_is_syncing(tx));
847 ASSERT(type < DD_USED_NUM);
848
849 if (needlock)
850 mutex_enter(&dd->dd_lock);
851 accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
852 ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
853 ASSERT(compressed >= 0 ||
854 dd->dd_phys->dd_compressed_bytes >= -compressed);
855 ASSERT(uncompressed >= 0 ||
856 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
857 dmu_buf_will_dirty(dd->dd_dbuf, tx);
858 dd->dd_phys->dd_used_bytes += used;
859 dd->dd_phys->dd_uncompressed_bytes += uncompressed;
860 dd->dd_phys->dd_compressed_bytes += compressed;
861
862 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
863 ASSERT(used > 0 ||
864 dd->dd_phys->dd_used_breakdown[type] >= -used);
865 dd->dd_phys->dd_used_breakdown[type] += used;
866 #ifdef DEBUG
867 dd_used_t t;
868 uint64_t u = 0;
869 for (t = 0; t < DD_USED_NUM; t++)
870 u += dd->dd_phys->dd_used_breakdown[t];
871 ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
872 #endif
873 }
874 if (needlock)
875 mutex_exit(&dd->dd_lock);
876
877 if (dd->dd_parent != NULL) {
878 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
879 accounted_delta, compressed, uncompressed, tx);
880 dsl_dir_transfer_space(dd->dd_parent,
881 used - accounted_delta,
882 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
883 }
884 }
885
886 void
887 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
888 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
889 {
890 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
891
892 ASSERT(dmu_tx_is_syncing(tx));
893 ASSERT(oldtype < DD_USED_NUM);
894 ASSERT(newtype < DD_USED_NUM);
895
896 if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
897 return;
898
899 if (needlock)
900 mutex_enter(&dd->dd_lock);
901 ASSERT(delta > 0 ?
902 dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
903 dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
904 ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
905 dmu_buf_will_dirty(dd->dd_dbuf, tx);
906 dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
907 dd->dd_phys->dd_used_breakdown[newtype] += delta;
908 if (needlock)
909 mutex_exit(&dd->dd_lock);
910 }
911
912 typedef struct dsl_dir_set_qr_arg {
913 const char *ddsqra_name;
914 zprop_source_t ddsqra_source;
915 uint64_t ddsqra_value;
916 } dsl_dir_set_qr_arg_t;
917
918 static int
919 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
920 {
921 dsl_dir_set_qr_arg_t *ddsqra = arg;
922 dsl_pool_t *dp = dmu_tx_pool(tx);
923 dsl_dataset_t *ds;
924 int error;
925 uint64_t towrite, newval;
926
927 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
928 if (error != 0)
|
824 }
825
826 /*
827 * Call in open context when we think we're going to write/free space,
828 * eg. when dirtying data. Be conservative (ie. OK to write less than
829 * this or free more than this, but don't write more or free less).
830 */
831 void
832 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
833 {
834 dsl_pool_willuse_space(dd->dd_pool, space, tx);
835 dsl_dir_willuse_space_impl(dd, space, tx);
836 }
837
838 /* call from syncing context when we actually write/free space for this dd */
839 void
840 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
841 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
842 {
843 int64_t accounted_delta;
844
845 /*
846 * dsl_dataset_set_refreservation_sync_impl() calls this with
847 * dd_lock held, so that it can atomically update
848 * ds->ds_reserved and the dsl_dir accounting, so that
849 * dsl_dataset_check_quota() can see dataset and dir accounting
850 * consistently.
851 */
852 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
853
854 ASSERT(dmu_tx_is_syncing(tx));
855 ASSERT(type < DD_USED_NUM);
856
857 dmu_buf_will_dirty(dd->dd_dbuf, tx);
858
859 if (needlock)
860 mutex_enter(&dd->dd_lock);
861 accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
862 ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
863 ASSERT(compressed >= 0 ||
864 dd->dd_phys->dd_compressed_bytes >= -compressed);
865 ASSERT(uncompressed >= 0 ||
866 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
867 dd->dd_phys->dd_used_bytes += used;
868 dd->dd_phys->dd_uncompressed_bytes += uncompressed;
869 dd->dd_phys->dd_compressed_bytes += compressed;
870
871 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
872 ASSERT(used > 0 ||
873 dd->dd_phys->dd_used_breakdown[type] >= -used);
874 dd->dd_phys->dd_used_breakdown[type] += used;
875 #ifdef DEBUG
876 dd_used_t t;
877 uint64_t u = 0;
878 for (t = 0; t < DD_USED_NUM; t++)
879 u += dd->dd_phys->dd_used_breakdown[t];
880 ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
881 #endif
882 }
883 if (needlock)
884 mutex_exit(&dd->dd_lock);
885
886 if (dd->dd_parent != NULL) {
887 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
888 accounted_delta, compressed, uncompressed, tx);
889 dsl_dir_transfer_space(dd->dd_parent,
890 used - accounted_delta,
891 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
892 }
893 }
894
895 void
896 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
897 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
898 {
899 ASSERT(dmu_tx_is_syncing(tx));
900 ASSERT(oldtype < DD_USED_NUM);
901 ASSERT(newtype < DD_USED_NUM);
902
903 if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
904 return;
905
906 dmu_buf_will_dirty(dd->dd_dbuf, tx);
907 mutex_enter(&dd->dd_lock);
908 ASSERT(delta > 0 ?
909 dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
910 dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
911 ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
912 dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
913 dd->dd_phys->dd_used_breakdown[newtype] += delta;
914 mutex_exit(&dd->dd_lock);
915 }
916
917 typedef struct dsl_dir_set_qr_arg {
918 const char *ddsqra_name;
919 zprop_source_t ddsqra_source;
920 uint64_t ddsqra_value;
921 } dsl_dir_set_qr_arg_t;
922
923 static int
924 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
925 {
926 dsl_dir_set_qr_arg_t *ddsqra = arg;
927 dsl_pool_t *dp = dmu_tx_pool(tx);
928 dsl_dataset_t *ds;
929 int error;
930 uint64_t towrite, newval;
931
932 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
933 if (error != 0)
|