126 /* protect access to list */
127 kmutex_t dr_mtx;
128
129 /* Our list of dirty children */
130 list_t dr_children;
131 } di;
132 struct dirty_leaf {
133
134 /*
135 * dr_data is set when we dirty the buffer
136 * so that we can retain the pointer even if it
137 * gets COW'd in a subsequent transaction group.
138 */
139 arc_buf_t *dr_data;
140 blkptr_t dr_overridden_by;
141 override_states_t dr_override_state;
142 uint8_t dr_copies;
143 boolean_t dr_nopwrite;
144 } dl;
145 } dt;
146 } dbuf_dirty_record_t;
147
148 typedef struct dmu_buf_impl {
149 /*
150 * The following members are immutable, with the exception of
151 * db.db_data, which is protected by db_mtx.
152 */
153
154 /* the publicly visible structure */
155 dmu_buf_t db;
156
157 /* the objset we belong to */
158 struct objset *db_objset;
159
160 /*
161 * handle to safely access the dnode we belong to (NULL when evicted)
162 */
163 struct dnode_handle *db_dnode_handle;
164
165 /*
258
259 dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, void *tag);
260 dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
261 void *tag);
262 int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid, int create,
263 void *tag, dmu_buf_impl_t **dbp);
264
265 void dbuf_prefetch(struct dnode *dn, uint64_t blkid, zio_priority_t prio);
266
267 void dbuf_add_ref(dmu_buf_impl_t *db, void *tag);
268 uint64_t dbuf_refcount(dmu_buf_impl_t *db);
269
270 void dbuf_rele(dmu_buf_impl_t *db, void *tag);
271 void dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag);
272
273 dmu_buf_impl_t *dbuf_find(struct dnode *dn, uint8_t level, uint64_t blkid);
274
275 int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
276 void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
277 void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
278 void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
279 void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
280 dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
281 arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
282 void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
283 bp_embedded_type_t etype, enum zio_compress comp,
284 int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
285
286 void dbuf_clear(dmu_buf_impl_t *db);
287 void dbuf_evict(dmu_buf_impl_t *db);
288
289 void dbuf_setdirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
290 void dbuf_unoverride(dbuf_dirty_record_t *dr);
291 void dbuf_sync_list(list_t *list, dmu_tx_t *tx);
292 void dbuf_release_bp(dmu_buf_impl_t *db);
293
294 void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
295 struct dmu_tx *);
296
297 void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
298
299 #define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
300 #define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
|
126 /* protect access to list */
127 kmutex_t dr_mtx;
128
129 /* Our list of dirty children */
130 list_t dr_children;
131 } di;
132 struct dirty_leaf {
133
134 /*
135 * dr_data is set when we dirty the buffer
136 * so that we can retain the pointer even if it
137 * gets COW'd in a subsequent transaction group.
138 */
139 arc_buf_t *dr_data;
140 blkptr_t dr_overridden_by;
141 override_states_t dr_override_state;
142 uint8_t dr_copies;
143 boolean_t dr_nopwrite;
144 } dl;
145 } dt;
146
147 boolean_t dr_zero_write;
148 } dbuf_dirty_record_t;
149
150 typedef struct dmu_buf_impl {
151 /*
152 * The following members are immutable, with the exception of
153 * db.db_data, which is protected by db_mtx.
154 */
155
156 /* the publicly visible structure */
157 dmu_buf_t db;
158
159 /* the objset we belong to */
160 struct objset *db_objset;
161
162 /*
163 * handle to safely access the dnode we belong to (NULL when evicted)
164 */
165 struct dnode_handle *db_dnode_handle;
166
167 /*
260
261 dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, void *tag);
262 dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
263 void *tag);
264 int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid, int create,
265 void *tag, dmu_buf_impl_t **dbp);
266
267 void dbuf_prefetch(struct dnode *dn, uint64_t blkid, zio_priority_t prio);
268
269 void dbuf_add_ref(dmu_buf_impl_t *db, void *tag);
270 uint64_t dbuf_refcount(dmu_buf_impl_t *db);
271
272 void dbuf_rele(dmu_buf_impl_t *db, void *tag);
273 void dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag);
274
275 dmu_buf_impl_t *dbuf_find(struct dnode *dn, uint8_t level, uint64_t blkid);
276
277 int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
278 void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
279 void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
280 void dmu_buf_will_zero_fill(dmu_buf_t *db, dmu_tx_t *tx);
281 void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
282 void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
283 dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx, boolean_t zero_write);
284 dbuf_dirty_record_t *dbuf_zero_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
285 arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
286 void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
287 bp_embedded_type_t etype, enum zio_compress comp,
288 int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
289
290 void dbuf_clear(dmu_buf_impl_t *db);
291 void dbuf_evict(dmu_buf_impl_t *db);
292
293 void dbuf_setdirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
294 void dbuf_unoverride(dbuf_dirty_record_t *dr);
295 void dbuf_sync_list(list_t *list, dmu_tx_t *tx);
296 void dbuf_release_bp(dmu_buf_impl_t *db);
297
298 void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
299 struct dmu_tx *);
300
301 void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
302
303 #define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
304 #define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
|