120 sa_os_t *os_sa;
121 };
122
123 #define DMU_META_OBJSET 0
124 #define DMU_META_DNODE_OBJECT 0
125 #define DMU_OBJECT_IS_SPECIAL(obj) ((int64_t)(obj) <= 0)
126 #define DMU_META_DNODE(os) ((os)->os_meta_dnode.dnh_dnode)
127 #define DMU_USERUSED_DNODE(os) ((os)->os_userused_dnode.dnh_dnode)
128 #define DMU_GROUPUSED_DNODE(os) ((os)->os_groupused_dnode.dnh_dnode)
129
130 #define DMU_OS_IS_L2CACHEABLE(os) \
131 ((os)->os_secondary_cache == ZFS_CACHE_ALL || \
132 (os)->os_secondary_cache == ZFS_CACHE_METADATA)
133
134 #define DMU_OS_IS_L2COMPRESSIBLE(os) (zfs_mdcomp_disable == B_FALSE)
135
136 /* called from zpl */
137 int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
138 int dmu_objset_own(const char *name, dmu_objset_type_t type,
139 boolean_t readonly, void *tag, objset_t **osp);
140 void dmu_objset_refresh_ownership(objset_t *os, void *tag);
141 void dmu_objset_rele(objset_t *os, void *tag);
142 void dmu_objset_disown(objset_t *os, void *tag);
143 int dmu_objset_from_ds(struct dsl_dataset *ds, objset_t **osp);
144
145 void dmu_objset_stats(objset_t *os, nvlist_t *nv);
146 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
147 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
148 uint64_t *usedobjsp, uint64_t *availobjsp);
149 uint64_t dmu_objset_fsid_guid(objset_t *os);
150 int dmu_objset_find_dp(struct dsl_pool *dp, uint64_t ddobj,
151 int func(struct dsl_pool *, struct dsl_dataset *, void *),
152 void *arg, int flags);
153 int dmu_objset_prefetch(const char *name, void *arg);
154 void dmu_objset_evict_dbufs(objset_t *os);
155 timestruc_t dmu_objset_snap_cmtime(objset_t *os);
156
157 /* called from dsl */
158 void dmu_objset_sync(objset_t *os, zio_t *zio, dmu_tx_t *tx);
159 boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg);
|
120 sa_os_t *os_sa;
121 };
122
123 #define DMU_META_OBJSET 0
124 #define DMU_META_DNODE_OBJECT 0
125 #define DMU_OBJECT_IS_SPECIAL(obj) ((int64_t)(obj) <= 0)
126 #define DMU_META_DNODE(os) ((os)->os_meta_dnode.dnh_dnode)
127 #define DMU_USERUSED_DNODE(os) ((os)->os_userused_dnode.dnh_dnode)
128 #define DMU_GROUPUSED_DNODE(os) ((os)->os_groupused_dnode.dnh_dnode)
129
130 #define DMU_OS_IS_L2CACHEABLE(os) \
131 ((os)->os_secondary_cache == ZFS_CACHE_ALL || \
132 (os)->os_secondary_cache == ZFS_CACHE_METADATA)
133
134 #define DMU_OS_IS_L2COMPRESSIBLE(os) (zfs_mdcomp_disable == B_FALSE)
135
136 /* called from zpl */
137 int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
138 int dmu_objset_own(const char *name, dmu_objset_type_t type,
139 boolean_t readonly, void *tag, objset_t **osp);
140 int dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
141 boolean_t readonly, void *tag, objset_t **osp);
142 void dmu_objset_refresh_ownership(objset_t *os, void *tag);
143 void dmu_objset_rele(objset_t *os, void *tag);
144 void dmu_objset_disown(objset_t *os, void *tag);
145 int dmu_objset_from_ds(struct dsl_dataset *ds, objset_t **osp);
146
147 void dmu_objset_stats(objset_t *os, nvlist_t *nv);
148 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
149 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
150 uint64_t *usedobjsp, uint64_t *availobjsp);
151 uint64_t dmu_objset_fsid_guid(objset_t *os);
152 int dmu_objset_find_dp(struct dsl_pool *dp, uint64_t ddobj,
153 int func(struct dsl_pool *, struct dsl_dataset *, void *),
154 void *arg, int flags);
155 int dmu_objset_prefetch(const char *name, void *arg);
156 void dmu_objset_evict_dbufs(objset_t *os);
157 timestruc_t dmu_objset_snap_cmtime(objset_t *os);
158
159 /* called from dsl */
160 void dmu_objset_sync(objset_t *os, zio_t *zio, dmu_tx_t *tx);
161 boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg);
|