118 kmutex_t os_user_ptr_lock;
119 void *os_user_ptr;
120 sa_os_t *os_sa;
121 };
122
123 #define DMU_META_OBJSET 0
124 #define DMU_META_DNODE_OBJECT 0
125 #define DMU_OBJECT_IS_SPECIAL(obj) ((int64_t)(obj) <= 0)
126 #define DMU_META_DNODE(os) ((os)->os_meta_dnode.dnh_dnode)
127 #define DMU_USERUSED_DNODE(os) ((os)->os_userused_dnode.dnh_dnode)
128 #define DMU_GROUPUSED_DNODE(os) ((os)->os_groupused_dnode.dnh_dnode)
129
130 #define DMU_OS_IS_L2CACHEABLE(os) \
131 ((os)->os_secondary_cache == ZFS_CACHE_ALL || \
132 (os)->os_secondary_cache == ZFS_CACHE_METADATA)
133
134 #define DMU_OS_IS_L2COMPRESSIBLE(os) (zfs_mdcomp_disable == B_FALSE)
135
136 /* called from zpl */
137 int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
138 int dmu_objset_own(const char *name, dmu_objset_type_t type,
139 boolean_t readonly, void *tag, objset_t **osp);
140 void dmu_objset_refresh_ownership(objset_t *os, void *tag);
141 void dmu_objset_rele(objset_t *os, void *tag);
142 void dmu_objset_disown(objset_t *os, void *tag);
143 int dmu_objset_from_ds(struct dsl_dataset *ds, objset_t **osp);
144
145 void dmu_objset_stats(objset_t *os, nvlist_t *nv);
146 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
147 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
148 uint64_t *usedobjsp, uint64_t *availobjsp);
149 uint64_t dmu_objset_fsid_guid(objset_t *os);
150 int dmu_objset_find_dp(struct dsl_pool *dp, uint64_t ddobj,
151 int func(struct dsl_pool *, struct dsl_dataset *, void *),
152 void *arg, int flags);
153 int dmu_objset_prefetch(const char *name, void *arg);
154 void dmu_objset_evict_dbufs(objset_t *os);
155 timestruc_t dmu_objset_snap_cmtime(objset_t *os);
156
157 /* called from dsl */
158 void dmu_objset_sync(objset_t *os, zio_t *zio, dmu_tx_t *tx);
159 boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg);
|
118 kmutex_t os_user_ptr_lock;
119 void *os_user_ptr;
120 sa_os_t *os_sa;
121 };
122
123 #define DMU_META_OBJSET 0
124 #define DMU_META_DNODE_OBJECT 0
125 #define DMU_OBJECT_IS_SPECIAL(obj) ((int64_t)(obj) <= 0)
126 #define DMU_META_DNODE(os) ((os)->os_meta_dnode.dnh_dnode)
127 #define DMU_USERUSED_DNODE(os) ((os)->os_userused_dnode.dnh_dnode)
128 #define DMU_GROUPUSED_DNODE(os) ((os)->os_groupused_dnode.dnh_dnode)
129
130 #define DMU_OS_IS_L2CACHEABLE(os) \
131 ((os)->os_secondary_cache == ZFS_CACHE_ALL || \
132 (os)->os_secondary_cache == ZFS_CACHE_METADATA)
133
134 #define DMU_OS_IS_L2COMPRESSIBLE(os) (zfs_mdcomp_disable == B_FALSE)
135
136 /* called from zpl */
137 int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
138 int dmu_objset_hold_nolock(const char *name, void *tag, objset_t **osp);
139 int dmu_objset_own(const char *name, dmu_objset_type_t type,
140 boolean_t readonly, void *tag, objset_t **osp);
141 int dmu_objset_own_nolock(const char *name, dmu_objset_type_t type,
142 boolean_t readonly, void *tag, objset_t **osp);
143 void dmu_objset_refresh_ownership(objset_t *os, void *tag);
144 void dmu_objset_rele(objset_t *os, void *tag);
145 void dmu_objset_disown(objset_t *os, void *tag);
146 int dmu_objset_from_ds(struct dsl_dataset *ds, objset_t **osp);
147
148 void dmu_objset_stats(objset_t *os, nvlist_t *nv);
149 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
150 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
151 uint64_t *usedobjsp, uint64_t *availobjsp);
152 uint64_t dmu_objset_fsid_guid(objset_t *os);
153 int dmu_objset_find_dp(struct dsl_pool *dp, uint64_t ddobj,
154 int func(struct dsl_pool *, struct dsl_dataset *, void *),
155 void *arg, int flags);
156 int dmu_objset_prefetch(const char *name, void *arg);
157 void dmu_objset_evict_dbufs(objset_t *os);
158 timestruc_t dmu_objset_snap_cmtime(objset_t *os);
159
160 /* called from dsl */
161 void dmu_objset_sync(objset_t *os, zio_t *zio, dmu_tx_t *tx);
162 boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg);
|