Print this page
3006 VERIFY[S,U,P] and ASSERT[S,U,P] frequently check if first argument is zero
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/bpobj.c
+++ new/usr/src/uts/common/fs/zfs/bpobj.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 - * Copyright (c) 2011 by Delphix. All rights reserved.
23 + * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 */
25 25
26 26 #include <sys/bpobj.h>
27 27 #include <sys/zfs_context.h>
28 28 #include <sys/refcount.h>
29 29 #include <sys/dsl_pool.h>
30 30
31 31 uint64_t
32 32 bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
33 33 {
34 34 int size;
35 35
36 36 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
37 37 size = BPOBJ_SIZE_V0;
38 38 else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
39 39 size = BPOBJ_SIZE_V1;
40 40 else
41 41 size = sizeof (bpobj_phys_t);
42 42
43 43 return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
44 44 DMU_OT_BPOBJ_HDR, size, tx));
45 45 }
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
46 46
47 47 void
48 48 bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
49 49 {
50 50 int64_t i;
51 51 bpobj_t bpo;
52 52 dmu_object_info_t doi;
53 53 int epb;
54 54 dmu_buf_t *dbuf = NULL;
55 55
56 - VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
56 + VERIFY0(bpobj_open(&bpo, os, obj));
57 57
58 58 mutex_enter(&bpo.bpo_lock);
59 59
60 60 if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
61 61 goto out;
62 62
63 - VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
63 + VERIFY0(dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
64 64 epb = doi.doi_data_block_size / sizeof (uint64_t);
65 65
66 66 for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
67 67 uint64_t *objarray;
68 68 uint64_t offset, blkoff;
69 69
70 70 offset = i * sizeof (uint64_t);
71 71 blkoff = P2PHASE(i, epb);
72 72
73 73 if (dbuf == NULL || dbuf->db_offset > offset) {
74 74 if (dbuf)
75 75 dmu_buf_rele(dbuf, FTAG);
76 - VERIFY3U(0, ==, dmu_buf_hold(os,
76 + VERIFY0(dmu_buf_hold(os,
77 77 bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
78 78 }
79 79
80 80 ASSERT3U(offset, >=, dbuf->db_offset);
81 81 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
82 82
83 83 objarray = dbuf->db_data;
84 84 bpobj_free(os, objarray[blkoff], tx);
85 85 }
86 86 if (dbuf) {
87 87 dmu_buf_rele(dbuf, FTAG);
88 88 dbuf = NULL;
89 89 }
90 - VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
90 + VERIFY0(dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
91 91
92 92 out:
93 93 mutex_exit(&bpo.bpo_lock);
94 94 bpobj_close(&bpo);
95 95
96 - VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
96 + VERIFY0(dmu_object_free(os, obj, tx));
97 97 }
98 98
99 99 int
100 100 bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
101 101 {
102 102 dmu_object_info_t doi;
103 103 int err;
104 104
105 105 err = dmu_object_info(os, object, &doi);
106 106 if (err)
107 107 return (err);
108 108
109 109 bzero(bpo, sizeof (*bpo));
110 110 mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
111 111
112 112 ASSERT(bpo->bpo_dbuf == NULL);
113 113 ASSERT(bpo->bpo_phys == NULL);
114 114 ASSERT(object != 0);
115 115 ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
116 116 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
117 117
118 118 err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
119 119 if (err)
120 120 return (err);
121 121
122 122 bpo->bpo_os = os;
123 123 bpo->bpo_object = object;
124 124 bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
125 125 bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
126 126 bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
127 127 bpo->bpo_phys = bpo->bpo_dbuf->db_data;
128 128 return (0);
129 129 }
130 130
131 131 void
132 132 bpobj_close(bpobj_t *bpo)
133 133 {
134 134 /* Lame workaround for closing a bpobj that was never opened. */
135 135 if (bpo->bpo_object == 0)
136 136 return;
137 137
138 138 dmu_buf_rele(bpo->bpo_dbuf, bpo);
139 139 if (bpo->bpo_cached_dbuf != NULL)
140 140 dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
141 141 bpo->bpo_dbuf = NULL;
142 142 bpo->bpo_phys = NULL;
143 143 bpo->bpo_cached_dbuf = NULL;
144 144 bpo->bpo_object = 0;
145 145
146 146 mutex_destroy(&bpo->bpo_lock);
147 147 }
148 148
149 149 static int
150 150 bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx,
151 151 boolean_t free)
152 152 {
153 153 dmu_object_info_t doi;
154 154 int epb;
155 155 int64_t i;
156 156 int err = 0;
157 157 dmu_buf_t *dbuf = NULL;
158 158
159 159 mutex_enter(&bpo->bpo_lock);
160 160
161 161 if (free)
162 162 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
163 163
164 164 for (i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= 0; i--) {
165 165 blkptr_t *bparray;
166 166 blkptr_t *bp;
167 167 uint64_t offset, blkoff;
168 168
169 169 offset = i * sizeof (blkptr_t);
170 170 blkoff = P2PHASE(i, bpo->bpo_epb);
171 171
172 172 if (dbuf == NULL || dbuf->db_offset > offset) {
173 173 if (dbuf)
174 174 dmu_buf_rele(dbuf, FTAG);
175 175 err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object, offset,
176 176 FTAG, &dbuf, 0);
177 177 if (err)
178 178 break;
179 179 }
180 180
181 181 ASSERT3U(offset, >=, dbuf->db_offset);
182 182 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
183 183
184 184 bparray = dbuf->db_data;
185 185 bp = &bparray[blkoff];
186 186 err = func(arg, bp, tx);
187 187 if (err)
188 188 break;
189 189 if (free) {
190 190 bpo->bpo_phys->bpo_bytes -=
191 191 bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
192 192 ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
193 193 if (bpo->bpo_havecomp) {
194 194 bpo->bpo_phys->bpo_comp -= BP_GET_PSIZE(bp);
195 195 bpo->bpo_phys->bpo_uncomp -= BP_GET_UCSIZE(bp);
196 196 }
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
197 197 bpo->bpo_phys->bpo_num_blkptrs--;
198 198 ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
199 199 }
200 200 }
201 201 if (dbuf) {
202 202 dmu_buf_rele(dbuf, FTAG);
203 203 dbuf = NULL;
204 204 }
205 205 if (free) {
206 206 i++;
207 - VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object,
207 + VERIFY0(dmu_free_range(bpo->bpo_os, bpo->bpo_object,
208 208 i * sizeof (blkptr_t), -1ULL, tx));
209 209 }
210 210 if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0)
211 211 goto out;
212 212
213 213 ASSERT(bpo->bpo_havecomp);
214 214 err = dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi);
215 215 if (err) {
216 216 mutex_exit(&bpo->bpo_lock);
217 217 return (err);
218 218 }
219 219 epb = doi.doi_data_block_size / sizeof (uint64_t);
220 220
221 221 for (i = bpo->bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
222 222 uint64_t *objarray;
223 223 uint64_t offset, blkoff;
224 224 bpobj_t sublist;
225 225 uint64_t used_before, comp_before, uncomp_before;
226 226 uint64_t used_after, comp_after, uncomp_after;
227 227
228 228 offset = i * sizeof (uint64_t);
229 229 blkoff = P2PHASE(i, epb);
230 230
231 231 if (dbuf == NULL || dbuf->db_offset > offset) {
232 232 if (dbuf)
233 233 dmu_buf_rele(dbuf, FTAG);
234 234 err = dmu_buf_hold(bpo->bpo_os,
235 235 bpo->bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0);
236 236 if (err)
237 237 break;
238 238 }
239 239
240 240 ASSERT3U(offset, >=, dbuf->db_offset);
241 241 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
242 242
243 243 objarray = dbuf->db_data;
244 244 err = bpobj_open(&sublist, bpo->bpo_os, objarray[blkoff]);
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
245 245 if (err)
246 246 break;
247 247 if (free) {
248 248 err = bpobj_space(&sublist,
249 249 &used_before, &comp_before, &uncomp_before);
250 250 if (err)
251 251 break;
252 252 }
253 253 err = bpobj_iterate_impl(&sublist, func, arg, tx, free);
254 254 if (free) {
255 - VERIFY3U(0, ==, bpobj_space(&sublist,
255 + VERIFY0(bpobj_space(&sublist,
256 256 &used_after, &comp_after, &uncomp_after));
257 257 bpo->bpo_phys->bpo_bytes -= used_before - used_after;
258 258 ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
259 259 bpo->bpo_phys->bpo_comp -= comp_before - comp_after;
260 260 bpo->bpo_phys->bpo_uncomp -=
261 261 uncomp_before - uncomp_after;
262 262 }
263 263
264 264 bpobj_close(&sublist);
265 265 if (err)
266 266 break;
267 267 if (free) {
268 268 err = dmu_object_free(bpo->bpo_os,
269 269 objarray[blkoff], tx);
270 270 if (err)
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
271 271 break;
272 272 bpo->bpo_phys->bpo_num_subobjs--;
273 273 ASSERT3S(bpo->bpo_phys->bpo_num_subobjs, >=, 0);
274 274 }
275 275 }
276 276 if (dbuf) {
277 277 dmu_buf_rele(dbuf, FTAG);
278 278 dbuf = NULL;
279 279 }
280 280 if (free) {
281 - VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os,
281 + VERIFY0(dmu_free_range(bpo->bpo_os,
282 282 bpo->bpo_phys->bpo_subobjs,
283 283 (i + 1) * sizeof (uint64_t), -1ULL, tx));
284 284 }
285 285
286 286 out:
287 287 /* If there are no entries, there should be no bytes. */
288 288 ASSERT(bpo->bpo_phys->bpo_num_blkptrs > 0 ||
289 289 (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_num_subobjs > 0) ||
290 290 bpo->bpo_phys->bpo_bytes == 0);
291 291
292 292 mutex_exit(&bpo->bpo_lock);
293 293 return (err);
294 294 }
295 295
296 296 /*
297 297 * Iterate and remove the entries. If func returns nonzero, iteration
298 298 * will stop and that entry will not be removed.
299 299 */
300 300 int
301 301 bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
302 302 {
303 303 return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE));
304 304 }
305 305
306 306 /*
307 307 * Iterate the entries. If func returns nonzero, iteration will stop.
308 308 */
309 309 int
310 310 bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
311 311 {
312 312 return (bpobj_iterate_impl(bpo, func, arg, tx, B_FALSE));
313 313 }
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
314 314
315 315 void
316 316 bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
317 317 {
318 318 bpobj_t subbpo;
319 319 uint64_t used, comp, uncomp, subsubobjs;
320 320
321 321 ASSERT(bpo->bpo_havesubobj);
322 322 ASSERT(bpo->bpo_havecomp);
323 323
324 - VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
325 - VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
324 + VERIFY0(bpobj_open(&subbpo, bpo->bpo_os, subobj));
325 + VERIFY0(bpobj_space(&subbpo, &used, &comp, &uncomp));
326 326
327 327 if (used == 0) {
328 328 /* No point in having an empty subobj. */
329 329 bpobj_close(&subbpo);
330 330 bpobj_free(bpo->bpo_os, subobj, tx);
331 331 return;
332 332 }
333 333
334 334 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
335 335 if (bpo->bpo_phys->bpo_subobjs == 0) {
336 336 bpo->bpo_phys->bpo_subobjs = dmu_object_alloc(bpo->bpo_os,
337 337 DMU_OT_BPOBJ_SUBOBJ, SPA_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
338 338 }
339 339
340 340 mutex_enter(&bpo->bpo_lock);
341 341 dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
342 342 bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
343 343 sizeof (subobj), &subobj, tx);
344 344 bpo->bpo_phys->bpo_num_subobjs++;
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
345 345
346 346 /*
347 347 * If subobj has only one block of subobjs, then move subobj's
348 348 * subobjs to bpo's subobj list directly. This reduces
349 349 * recursion in bpobj_iterate due to nested subobjs.
350 350 */
351 351 subsubobjs = subbpo.bpo_phys->bpo_subobjs;
352 352 if (subsubobjs != 0) {
353 353 dmu_object_info_t doi;
354 354
355 - VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
355 + VERIFY0(dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
356 356 if (doi.doi_max_offset == doi.doi_data_block_size) {
357 357 dmu_buf_t *subdb;
358 358 uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
359 359
360 - VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
360 + VERIFY0(dmu_buf_hold(bpo->bpo_os, subsubobjs,
361 361 0, FTAG, &subdb, 0));
362 362 dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
363 363 bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
364 364 numsubsub * sizeof (subobj), subdb->db_data, tx);
365 365 dmu_buf_rele(subdb, FTAG);
366 366 bpo->bpo_phys->bpo_num_subobjs += numsubsub;
367 367
368 368 dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
369 369 subbpo.bpo_phys->bpo_subobjs = 0;
370 - VERIFY3U(0, ==, dmu_object_free(bpo->bpo_os,
370 + VERIFY0(dmu_object_free(bpo->bpo_os,
371 371 subsubobjs, tx));
372 372 }
373 373 }
374 374 bpo->bpo_phys->bpo_bytes += used;
375 375 bpo->bpo_phys->bpo_comp += comp;
376 376 bpo->bpo_phys->bpo_uncomp += uncomp;
377 377 mutex_exit(&bpo->bpo_lock);
378 378
379 379 bpobj_close(&subbpo);
380 380 }
381 381
382 382 void
383 383 bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx)
384 384 {
385 385 blkptr_t stored_bp = *bp;
386 386 uint64_t offset;
387 387 int blkoff;
388 388 blkptr_t *bparray;
389 389
390 390 ASSERT(!BP_IS_HOLE(bp));
391 391
392 392 /* We never need the fill count. */
393 393 stored_bp.blk_fill = 0;
394 394
395 395 /* The bpobj will compress better if we can leave off the checksum */
396 396 if (!BP_GET_DEDUP(bp))
397 397 bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
398 398
399 399 mutex_enter(&bpo->bpo_lock);
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
400 400
401 401 offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
402 402 blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
403 403
404 404 if (bpo->bpo_cached_dbuf == NULL ||
405 405 offset < bpo->bpo_cached_dbuf->db_offset ||
406 406 offset >= bpo->bpo_cached_dbuf->db_offset +
407 407 bpo->bpo_cached_dbuf->db_size) {
408 408 if (bpo->bpo_cached_dbuf)
409 409 dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
410 - VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
410 + VERIFY0(dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
411 411 offset, bpo, &bpo->bpo_cached_dbuf, 0));
412 412 }
413 413
414 414 dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
415 415 bparray = bpo->bpo_cached_dbuf->db_data;
416 416 bparray[blkoff] = stored_bp;
417 417
418 418 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
419 419 bpo->bpo_phys->bpo_num_blkptrs++;
420 420 bpo->bpo_phys->bpo_bytes +=
421 421 bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
422 422 if (bpo->bpo_havecomp) {
423 423 bpo->bpo_phys->bpo_comp += BP_GET_PSIZE(bp);
424 424 bpo->bpo_phys->bpo_uncomp += BP_GET_UCSIZE(bp);
425 425 }
426 426 mutex_exit(&bpo->bpo_lock);
427 427 }
428 428
429 429 struct space_range_arg {
430 430 spa_t *spa;
431 431 uint64_t mintxg;
432 432 uint64_t maxtxg;
433 433 uint64_t used;
434 434 uint64_t comp;
435 435 uint64_t uncomp;
436 436 };
437 437
438 438 /* ARGSUSED */
439 439 static int
440 440 space_range_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
441 441 {
442 442 struct space_range_arg *sra = arg;
443 443
444 444 if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
445 445 if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
446 446 sra->used += bp_get_dsize_sync(sra->spa, bp);
447 447 else
448 448 sra->used += bp_get_dsize(sra->spa, bp);
449 449 sra->comp += BP_GET_PSIZE(bp);
450 450 sra->uncomp += BP_GET_UCSIZE(bp);
451 451 }
452 452 return (0);
453 453 }
454 454
455 455 int
456 456 bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
457 457 {
458 458 mutex_enter(&bpo->bpo_lock);
459 459
460 460 *usedp = bpo->bpo_phys->bpo_bytes;
461 461 if (bpo->bpo_havecomp) {
462 462 *compp = bpo->bpo_phys->bpo_comp;
463 463 *uncompp = bpo->bpo_phys->bpo_uncomp;
464 464 mutex_exit(&bpo->bpo_lock);
465 465 return (0);
466 466 } else {
467 467 mutex_exit(&bpo->bpo_lock);
468 468 return (bpobj_space_range(bpo, 0, UINT64_MAX,
469 469 usedp, compp, uncompp));
470 470 }
471 471 }
472 472
473 473 /*
474 474 * Return the amount of space in the bpobj which is:
475 475 * mintxg < blk_birth <= maxtxg
476 476 */
477 477 int
478 478 bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
479 479 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
480 480 {
481 481 struct space_range_arg sra = { 0 };
482 482 int err;
483 483
484 484 /*
485 485 * As an optimization, if they want the whole txg range, just
486 486 * get bpo_bytes rather than iterating over the bps.
487 487 */
488 488 if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
489 489 return (bpobj_space(bpo, usedp, compp, uncompp));
490 490
491 491 sra.spa = dmu_objset_spa(bpo->bpo_os);
492 492 sra.mintxg = mintxg;
493 493 sra.maxtxg = maxtxg;
494 494
495 495 err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
496 496 *usedp = sra.used;
497 497 *compp = sra.comp;
498 498 *uncompp = sra.uncomp;
499 499 return (err);
500 500 }
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX