125 sa_os_t *os_sa;
126 };
127
128 #define DMU_META_OBJSET 0
129 #define DMU_META_DNODE_OBJECT 0
130 #define DMU_OBJECT_IS_SPECIAL(obj) ((int64_t)(obj) <= 0)
131 #define DMU_META_DNODE(os) ((os)->os_meta_dnode.dnh_dnode)
132 #define DMU_USERUSED_DNODE(os) ((os)->os_userused_dnode.dnh_dnode)
133 #define DMU_GROUPUSED_DNODE(os) ((os)->os_groupused_dnode.dnh_dnode)
134
135 #define DMU_OS_IS_L2CACHEABLE(os) \
136 ((os)->os_secondary_cache == ZFS_CACHE_ALL || \
137 (os)->os_secondary_cache == ZFS_CACHE_METADATA)
138
139 #define DMU_OS_IS_L2COMPRESSIBLE(os) (zfs_mdcomp_disable == B_FALSE)
140
141 /* called from zpl */
142 int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
143 int dmu_objset_own(const char *name, dmu_objset_type_t type,
144 boolean_t readonly, void *tag, objset_t **osp);
145 void dmu_objset_refresh_ownership(objset_t *os, void *tag);
146 void dmu_objset_rele(objset_t *os, void *tag);
147 void dmu_objset_disown(objset_t *os, void *tag);
148 int dmu_objset_from_ds(struct dsl_dataset *ds, objset_t **osp);
149
150 void dmu_objset_stats(objset_t *os, nvlist_t *nv);
151 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
152 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
153 uint64_t *usedobjsp, uint64_t *availobjsp);
154 uint64_t dmu_objset_fsid_guid(objset_t *os);
155 int dmu_objset_find_dp(struct dsl_pool *dp, uint64_t ddobj,
156 int func(struct dsl_pool *, struct dsl_dataset *, void *),
157 void *arg, int flags);
158 int dmu_objset_prefetch(const char *name, void *arg);
159 void dmu_objset_evict_dbufs(objset_t *os);
160 timestruc_t dmu_objset_snap_cmtime(objset_t *os);
161
162 /* called from dsl */
163 void dmu_objset_sync(objset_t *os, zio_t *zio, dmu_tx_t *tx);
164 boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg);
|
125 sa_os_t *os_sa;
126 };
127
128 #define DMU_META_OBJSET 0
129 #define DMU_META_DNODE_OBJECT 0
130 #define DMU_OBJECT_IS_SPECIAL(obj) ((int64_t)(obj) <= 0)
131 #define DMU_META_DNODE(os) ((os)->os_meta_dnode.dnh_dnode)
132 #define DMU_USERUSED_DNODE(os) ((os)->os_userused_dnode.dnh_dnode)
133 #define DMU_GROUPUSED_DNODE(os) ((os)->os_groupused_dnode.dnh_dnode)
134
135 #define DMU_OS_IS_L2CACHEABLE(os) \
136 ((os)->os_secondary_cache == ZFS_CACHE_ALL || \
137 (os)->os_secondary_cache == ZFS_CACHE_METADATA)
138
139 #define DMU_OS_IS_L2COMPRESSIBLE(os) (zfs_mdcomp_disable == B_FALSE)
140
141 /* called from zpl */
142 int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
143 int dmu_objset_own(const char *name, dmu_objset_type_t type,
144 boolean_t readonly, void *tag, objset_t **osp);
145 int dmu_objset_own_obj(struct dsl_pool *dp, uint64_t obj,
146 dmu_objset_type_t type, boolean_t readonly, void *tag, objset_t **osp);
147 void dmu_objset_refresh_ownership(objset_t *os, void *tag);
148 void dmu_objset_rele(objset_t *os, void *tag);
149 void dmu_objset_disown(objset_t *os, void *tag);
150 int dmu_objset_from_ds(struct dsl_dataset *ds, objset_t **osp);
151
152 void dmu_objset_stats(objset_t *os, nvlist_t *nv);
153 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
154 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
155 uint64_t *usedobjsp, uint64_t *availobjsp);
156 uint64_t dmu_objset_fsid_guid(objset_t *os);
157 int dmu_objset_find_dp(struct dsl_pool *dp, uint64_t ddobj,
158 int func(struct dsl_pool *, struct dsl_dataset *, void *),
159 void *arg, int flags);
160 int dmu_objset_prefetch(const char *name, void *arg);
161 void dmu_objset_evict_dbufs(objset_t *os);
162 timestruc_t dmu_objset_snap_cmtime(objset_t *os);
163
164 /* called from dsl */
165 void dmu_objset_sync(objset_t *os, zio_t *zio, dmu_tx_t *tx);
166 boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg);
|