Print this page
4171 clean up spa_feature_*() interfaces
4172 implement extensible_dataset feature for use by other zpool features
Reviewed by: Max Grossman <max.grossman@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/space_map.c
+++ new/usr/src/uts/common/fs/zfs/space_map.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 27 */
28 28
29 29 #include <sys/zfs_context.h>
30 30 #include <sys/spa.h>
31 31 #include <sys/dmu.h>
32 32 #include <sys/dmu_tx.h>
33 33 #include <sys/dnode.h>
34 34 #include <sys/dsl_pool.h>
35 35 #include <sys/zio.h>
36 36 #include <sys/space_map.h>
37 37 #include <sys/refcount.h>
38 38 #include <sys/zfeature.h>
39 39
40 40 /*
41 41 * This value controls how the space map's block size is allowed to grow.
42 42 * If the value is set to the same size as SPACE_MAP_INITIAL_BLOCKSIZE then
43 43 * the space map block size will remain fixed. Setting this value to something
44 44 * greater than SPACE_MAP_INITIAL_BLOCKSIZE will allow the space map to
45 45 * increase its block size as needed. To maintain backwards compatibilty the
46 46 * space map's block size must be a power of 2 and SPACE_MAP_INITIAL_BLOCKSIZE
47 47 * or larger.
48 48 */
49 49 int space_map_max_blksz = (1 << 12);
50 50
51 51 /*
52 52 * Load the space map disk into the specified range tree. Segments of maptype
53 53 * are added to the range tree, other segment types are removed.
54 54 *
55 55 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
56 56 * The caller must be OK with this.
57 57 */
58 58 int
59 59 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
60 60 {
61 61 uint64_t *entry, *entry_map, *entry_map_end;
62 62 uint64_t bufsize, size, offset, end, space;
63 63 int error = 0;
64 64
65 65 ASSERT(MUTEX_HELD(sm->sm_lock));
66 66
67 67 end = space_map_length(sm);
68 68 space = space_map_allocated(sm);
69 69
70 70 VERIFY0(range_tree_space(rt));
71 71
72 72 if (maptype == SM_FREE) {
73 73 range_tree_add(rt, sm->sm_start, sm->sm_size);
74 74 space = sm->sm_size - space;
75 75 }
76 76
77 77 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
78 78 entry_map = zio_buf_alloc(bufsize);
79 79
80 80 mutex_exit(sm->sm_lock);
81 81 if (end > bufsize) {
82 82 dmu_prefetch(sm->sm_os, space_map_object(sm), bufsize,
83 83 end - bufsize);
84 84 }
85 85 mutex_enter(sm->sm_lock);
86 86
87 87 for (offset = 0; offset < end; offset += bufsize) {
88 88 size = MIN(end - offset, bufsize);
89 89 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
90 90 VERIFY(size != 0);
91 91 ASSERT3U(sm->sm_blksz, !=, 0);
92 92
93 93 dprintf("object=%llu offset=%llx size=%llx\n",
94 94 space_map_object(sm), offset, size);
95 95
96 96 mutex_exit(sm->sm_lock);
97 97 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
98 98 entry_map, DMU_READ_PREFETCH);
99 99 mutex_enter(sm->sm_lock);
100 100 if (error != 0)
101 101 break;
102 102
103 103 entry_map_end = entry_map + (size / sizeof (uint64_t));
104 104 for (entry = entry_map; entry < entry_map_end; entry++) {
105 105 uint64_t e = *entry;
106 106 uint64_t offset, size;
107 107
108 108 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
109 109 continue;
110 110
111 111 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
112 112 sm->sm_start;
113 113 size = SM_RUN_DECODE(e) << sm->sm_shift;
114 114
115 115 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
116 116 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
117 117 VERIFY3U(offset, >=, sm->sm_start);
118 118 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
119 119 if (SM_TYPE_DECODE(e) == maptype) {
120 120 VERIFY3U(range_tree_space(rt) + size, <=,
121 121 sm->sm_size);
122 122 range_tree_add(rt, offset, size);
123 123 } else {
124 124 range_tree_remove(rt, offset, size);
125 125 }
126 126 }
127 127 }
128 128
129 129 if (error == 0)
130 130 VERIFY3U(range_tree_space(rt), ==, space);
131 131 else
132 132 range_tree_vacate(rt, NULL, NULL);
133 133
134 134 zio_buf_free(entry_map, bufsize);
135 135 return (error);
136 136 }
137 137
138 138 void
139 139 space_map_histogram_clear(space_map_t *sm)
140 140 {
141 141 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
142 142 return;
143 143
144 144 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
145 145 }
146 146
147 147 boolean_t
148 148 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
149 149 {
150 150 /*
151 151 * Verify that the in-core range tree does not have any
152 152 * ranges smaller than our sm_shift size.
153 153 */
154 154 for (int i = 0; i < sm->sm_shift; i++) {
155 155 if (rt->rt_histogram[i] != 0)
156 156 return (B_FALSE);
157 157 }
158 158 return (B_TRUE);
159 159 }
160 160
161 161 void
162 162 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
163 163 {
164 164 int idx = 0;
165 165
166 166 ASSERT(MUTEX_HELD(rt->rt_lock));
167 167 ASSERT(dmu_tx_is_syncing(tx));
168 168 VERIFY3U(space_map_object(sm), !=, 0);
169 169
170 170 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
171 171 return;
172 172
173 173 dmu_buf_will_dirty(sm->sm_dbuf, tx);
174 174
175 175 ASSERT(space_map_histogram_verify(sm, rt));
176 176
177 177 /*
178 178 * Transfer the content of the range tree histogram to the space
179 179 * map histogram. The space map histogram contains 32 buckets ranging
180 180 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
181 181 * however, can represent ranges from 2^0 to 2^63. Since the space
182 182 * map only cares about allocatable blocks (minimum of sm_shift) we
183 183 * can safely ignore all ranges in the range tree smaller than sm_shift.
184 184 */
185 185 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
186 186
187 187 /*
188 188 * Since the largest histogram bucket in the space map is
189 189 * 2^(32+sm_shift-1), we need to normalize the values in
190 190 * the range tree for any bucket larger than that size. For
191 191 * example given an sm_shift of 9, ranges larger than 2^40
192 192 * would get normalized as if they were 1TB ranges. Assume
193 193 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
194 194 * the calculation below would normalize this to 5 * 2^4 (16).
195 195 */
196 196 ASSERT3U(i, >=, idx + sm->sm_shift);
197 197 sm->sm_phys->smp_histogram[idx] +=
198 198 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
199 199
200 200 /*
201 201 * Increment the space map's index as long as we haven't
202 202 * reached the maximum bucket size. Accumulate all ranges
203 203 * larger than the max bucket size into the last bucket.
204 204 */
205 205 if (idx < SPACE_MAP_HISTOGRAM_SIZE(sm) - 1) {
206 206 ASSERT3U(idx + sm->sm_shift, ==, i);
207 207 idx++;
208 208 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE(sm));
209 209 }
210 210 }
211 211 }
212 212
213 213 uint64_t
214 214 space_map_entries(space_map_t *sm, range_tree_t *rt)
215 215 {
216 216 avl_tree_t *t = &rt->rt_root;
217 217 range_seg_t *rs;
218 218 uint64_t size, entries;
219 219
220 220 /*
221 221 * All space_maps always have a debug entry so account for it here.
222 222 */
223 223 entries = 1;
224 224
225 225 /*
226 226 * Traverse the range tree and calculate the number of space map
227 227 * entries that would be required to write out the range tree.
228 228 */
229 229 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
230 230 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
231 231 entries += howmany(size, SM_RUN_MAX);
232 232 }
233 233 return (entries);
234 234 }
235 235
236 236 void
237 237 space_map_set_blocksize(space_map_t *sm, uint64_t size, dmu_tx_t *tx)
238 238 {
239 239 uint32_t blksz;
240 240 u_longlong_t blocks;
241 241
242 242 ASSERT3U(sm->sm_blksz, !=, 0);
243 243 ASSERT3U(space_map_object(sm), !=, 0);
244 244 ASSERT(sm->sm_dbuf != NULL);
245 245 VERIFY(ISP2(space_map_max_blksz));
246 246
247 247 if (sm->sm_blksz >= space_map_max_blksz)
248 248 return;
249 249
250 250 /*
251 251 * The object contains more than one block so we can't adjust
252 252 * its size.
253 253 */
254 254 if (sm->sm_phys->smp_objsize > sm->sm_blksz)
255 255 return;
256 256
257 257 if (size > sm->sm_blksz) {
258 258 uint64_t newsz;
259 259
260 260 /*
261 261 * Older software versions treat space map blocks as fixed
262 262 * entities. The DMU is capable of handling different block
263 263 * sizes making it possible for us to increase the
264 264 * block size and maintain backwards compatibility. The
265 265 * caveat is that the new block sizes must be a
266 266 * power of 2 so that old software can append to the file,
267 267 * adding more blocks. The block size can grow until it
268 268 * reaches space_map_max_blksz.
269 269 */
270 270 newsz = ISP2(size) ? size : 1ULL << highbit(size);
271 271 if (newsz > space_map_max_blksz)
272 272 newsz = space_map_max_blksz;
273 273
274 274 VERIFY0(dmu_object_set_blocksize(sm->sm_os,
275 275 space_map_object(sm), newsz, 0, tx));
276 276 dmu_object_size_from_db(sm->sm_dbuf, &blksz, &blocks);
277 277
278 278 zfs_dbgmsg("txg %llu, spa %s, increasing blksz from %d to %d",
279 279 dmu_tx_get_txg(tx), spa_name(dmu_objset_spa(sm->sm_os)),
280 280 sm->sm_blksz, blksz);
281 281
282 282 VERIFY3U(newsz, ==, blksz);
283 283 VERIFY3U(sm->sm_blksz, <, blksz);
284 284 sm->sm_blksz = blksz;
285 285 }
286 286 }
287 287
288 288 /*
289 289 * Note: space_map_write() will drop sm_lock across dmu_write() calls.
290 290 */
291 291 void
292 292 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
293 293 dmu_tx_t *tx)
294 294 {
295 295 objset_t *os = sm->sm_os;
296 296 spa_t *spa = dmu_objset_spa(os);
297 297 avl_tree_t *t = &rt->rt_root;
298 298 range_seg_t *rs;
299 299 uint64_t size, total, rt_space, nodes;
300 300 uint64_t *entry, *entry_map, *entry_map_end;
301 301 uint64_t newsz, expected_entries, actual_entries = 1;
302 302
303 303 ASSERT(MUTEX_HELD(rt->rt_lock));
304 304 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
305 305 VERIFY3U(space_map_object(sm), !=, 0);
306 306 dmu_buf_will_dirty(sm->sm_dbuf, tx);
307 307
308 308 /*
309 309 * This field is no longer necessary since the in-core space map
310 310 * now contains the object number but is maintained for backwards
311 311 * compatibility.
312 312 */
313 313 sm->sm_phys->smp_object = sm->sm_object;
314 314
315 315 if (range_tree_space(rt) == 0) {
316 316 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
317 317 return;
318 318 }
319 319
320 320 if (maptype == SM_ALLOC)
321 321 sm->sm_phys->smp_alloc += range_tree_space(rt);
322 322 else
323 323 sm->sm_phys->smp_alloc -= range_tree_space(rt);
324 324
325 325 expected_entries = space_map_entries(sm, rt);
326 326
327 327 /*
328 328 * Calculate the new size for the space map on-disk and see if
329 329 * we can grow the block size to accommodate the new size.
330 330 */
331 331 newsz = sm->sm_phys->smp_objsize + expected_entries * sizeof (uint64_t);
332 332 space_map_set_blocksize(sm, newsz, tx);
333 333
334 334 entry_map = zio_buf_alloc(sm->sm_blksz);
335 335 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
336 336 entry = entry_map;
337 337
338 338 *entry++ = SM_DEBUG_ENCODE(1) |
339 339 SM_DEBUG_ACTION_ENCODE(maptype) |
340 340 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
341 341 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
342 342
343 343 total = 0;
344 344 nodes = avl_numnodes(&rt->rt_root);
345 345 rt_space = range_tree_space(rt);
346 346 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
347 347 uint64_t start;
348 348
349 349 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
350 350 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
351 351
352 352 total += size << sm->sm_shift;
353 353
354 354 while (size != 0) {
355 355 uint64_t run_len;
356 356
357 357 run_len = MIN(size, SM_RUN_MAX);
358 358
359 359 if (entry == entry_map_end) {
360 360 mutex_exit(rt->rt_lock);
361 361 dmu_write(os, space_map_object(sm),
362 362 sm->sm_phys->smp_objsize, sm->sm_blksz,
363 363 entry_map, tx);
364 364 mutex_enter(rt->rt_lock);
365 365 sm->sm_phys->smp_objsize += sm->sm_blksz;
366 366 entry = entry_map;
367 367 }
368 368
369 369 *entry++ = SM_OFFSET_ENCODE(start) |
370 370 SM_TYPE_ENCODE(maptype) |
371 371 SM_RUN_ENCODE(run_len);
372 372
373 373 start += run_len;
374 374 size -= run_len;
375 375 actual_entries++;
376 376 }
377 377 }
378 378
379 379 if (entry != entry_map) {
380 380 size = (entry - entry_map) * sizeof (uint64_t);
381 381 mutex_exit(rt->rt_lock);
382 382 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
383 383 size, entry_map, tx);
384 384 mutex_enter(rt->rt_lock);
385 385 sm->sm_phys->smp_objsize += size;
386 386 }
387 387 ASSERT3U(expected_entries, ==, actual_entries);
388 388
389 389 /*
390 390 * Ensure that the space_map's accounting wasn't changed
391 391 * while we were in the middle of writing it out.
392 392 */
393 393 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
394 394 VERIFY3U(range_tree_space(rt), ==, rt_space);
395 395 VERIFY3U(range_tree_space(rt), ==, total);
396 396
397 397 zio_buf_free(entry_map, sm->sm_blksz);
398 398 }
399 399
400 400 static int
401 401 space_map_open_impl(space_map_t *sm)
402 402 {
403 403 int error;
404 404 u_longlong_t blocks;
405 405
406 406 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
407 407 if (error)
408 408 return (error);
409 409
410 410 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
411 411 sm->sm_phys = sm->sm_dbuf->db_data;
412 412 return (0);
413 413 }
414 414
415 415 int
416 416 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
417 417 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
418 418 {
419 419 space_map_t *sm;
420 420 int error;
421 421
422 422 ASSERT(*smp == NULL);
423 423 ASSERT(os != NULL);
424 424 ASSERT(object != 0);
425 425
426 426 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
427 427
428 428 sm->sm_start = start;
429 429 sm->sm_size = size;
430 430 sm->sm_shift = shift;
431 431 sm->sm_lock = lp;
432 432 sm->sm_os = os;
433 433 sm->sm_object = object;
434 434
435 435 error = space_map_open_impl(sm);
436 436 if (error != 0) {
437 437 space_map_close(sm);
438 438 return (error);
439 439 }
440 440
441 441 *smp = sm;
442 442
443 443 return (0);
444 444 }
445 445
446 446 void
447 447 space_map_close(space_map_t *sm)
448 448 {
449 449 if (sm == NULL)
450 450 return;
451 451
452 452 if (sm->sm_dbuf != NULL)
453 453 dmu_buf_rele(sm->sm_dbuf, sm);
454 454 sm->sm_dbuf = NULL;
455 455 sm->sm_phys = NULL;
456 456
457 457 kmem_free(sm, sizeof (*sm));
458 458 }
459 459
460 460 static void
461 461 space_map_reallocate(space_map_t *sm, dmu_tx_t *tx)
462 462 {
463 463 ASSERT(dmu_tx_is_syncing(tx));
464 464
465 465 space_map_free(sm, tx);
466 466 dmu_buf_rele(sm->sm_dbuf, sm);
↓ open down ↓ |
466 lines elided |
↑ open up ↑ |
467 467
468 468 sm->sm_object = space_map_alloc(sm->sm_os, tx);
469 469 VERIFY0(space_map_open_impl(sm));
470 470 }
471 471
472 472 void
473 473 space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
474 474 {
475 475 objset_t *os = sm->sm_os;
476 476 spa_t *spa = dmu_objset_spa(os);
477 - zfeature_info_t *space_map_histogram =
478 - &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM];
479 477 dmu_object_info_t doi;
480 478 int bonuslen;
481 479
482 480 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
483 481 ASSERT(dmu_tx_is_syncing(tx));
484 482
485 483 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
486 484 dmu_object_info_from_db(sm->sm_dbuf, &doi);
487 485
488 - if (spa_feature_is_enabled(spa, space_map_histogram)) {
486 + if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
489 487 bonuslen = sizeof (space_map_phys_t);
490 488 ASSERT3U(bonuslen, <=, dmu_bonus_max());
491 489 } else {
492 490 bonuslen = SPACE_MAP_SIZE_V0;
493 491 }
494 492
495 493 if (bonuslen != doi.doi_bonus_size ||
496 494 doi.doi_data_block_size != SPACE_MAP_INITIAL_BLOCKSIZE) {
497 495 zfs_dbgmsg("txg %llu, spa %s, reallocating: "
498 496 "old bonus %u, old blocksz %u", dmu_tx_get_txg(tx),
499 497 spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size);
500 498 space_map_reallocate(sm, tx);
501 499 VERIFY3U(sm->sm_blksz, ==, SPACE_MAP_INITIAL_BLOCKSIZE);
502 500 }
503 501
504 502 dmu_buf_will_dirty(sm->sm_dbuf, tx);
505 503 sm->sm_phys->smp_objsize = 0;
506 504 sm->sm_phys->smp_alloc = 0;
507 505 }
508 506
509 507 /*
510 508 * Update the in-core space_map allocation and length values.
511 509 */
512 510 void
513 511 space_map_update(space_map_t *sm)
514 512 {
515 513 if (sm == NULL)
516 514 return;
517 515
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
518 516 ASSERT(MUTEX_HELD(sm->sm_lock));
519 517
520 518 sm->sm_alloc = sm->sm_phys->smp_alloc;
521 519 sm->sm_length = sm->sm_phys->smp_objsize;
522 520 }
523 521
524 522 uint64_t
525 523 space_map_alloc(objset_t *os, dmu_tx_t *tx)
526 524 {
527 525 spa_t *spa = dmu_objset_spa(os);
528 - zfeature_info_t *space_map_histogram =
529 - &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM];
530 526 uint64_t object;
531 527 int bonuslen;
532 528
533 - if (spa_feature_is_enabled(spa, space_map_histogram)) {
534 - spa_feature_incr(spa, space_map_histogram, tx);
529 + if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
530 + spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
535 531 bonuslen = sizeof (space_map_phys_t);
536 532 ASSERT3U(bonuslen, <=, dmu_bonus_max());
537 533 } else {
538 534 bonuslen = SPACE_MAP_SIZE_V0;
539 535 }
540 536
541 537 object = dmu_object_alloc(os,
542 538 DMU_OT_SPACE_MAP, SPACE_MAP_INITIAL_BLOCKSIZE,
543 539 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
544 540
545 541 return (object);
546 542 }
547 543
548 544 void
549 545 space_map_free(space_map_t *sm, dmu_tx_t *tx)
550 546 {
551 547 spa_t *spa;
552 - zfeature_info_t *space_map_histogram =
553 - &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM];
554 548
555 549 if (sm == NULL)
556 550 return;
557 551
558 552 spa = dmu_objset_spa(sm->sm_os);
559 - if (spa_feature_is_enabled(spa, space_map_histogram)) {
553 + if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
560 554 dmu_object_info_t doi;
561 555
562 556 dmu_object_info_from_db(sm->sm_dbuf, &doi);
563 557 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
564 - VERIFY(spa_feature_is_active(spa, space_map_histogram));
565 - spa_feature_decr(spa, space_map_histogram, tx);
558 + VERIFY(spa_feature_is_active(spa,
559 + SPA_FEATURE_SPACEMAP_HISTOGRAM));
560 + spa_feature_decr(spa,
561 + SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
566 562 }
567 563 }
568 564
569 565 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
570 566 sm->sm_object = 0;
571 567 }
572 568
573 569 uint64_t
574 570 space_map_object(space_map_t *sm)
575 571 {
576 572 return (sm != NULL ? sm->sm_object : 0);
577 573 }
578 574
579 575 /*
580 576 * Returns the already synced, on-disk allocated space.
581 577 */
582 578 uint64_t
583 579 space_map_allocated(space_map_t *sm)
584 580 {
585 581 return (sm != NULL ? sm->sm_alloc : 0);
586 582 }
587 583
588 584 /*
589 585 * Returns the already synced, on-disk length;
590 586 */
591 587 uint64_t
592 588 space_map_length(space_map_t *sm)
593 589 {
594 590 return (sm != NULL ? sm->sm_length : 0);
595 591 }
596 592
597 593 /*
598 594 * Returns the allocated space that is currently syncing.
599 595 */
600 596 int64_t
601 597 space_map_alloc_delta(space_map_t *sm)
602 598 {
603 599 if (sm == NULL)
604 600 return (0);
605 601 ASSERT(sm->sm_dbuf != NULL);
606 602 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
607 603 }
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX