1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright (c) 2013 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dnode.h> 34 #include <sys/dsl_pool.h> 35 #include <sys/zio.h> 36 #include <sys/space_map.h> 37 #include <sys/refcount.h> 38 #include <sys/zfeature.h> 39 40 /* 41 * This value controls how the space map's block size is allowed to grow. 42 * If the value is set to the same size as SPACE_MAP_INITIAL_BLOCKSIZE then 43 * the space map block size will remain fixed. Setting this value to something 44 * greater than SPACE_MAP_INITIAL_BLOCKSIZE will allow the space map to 45 * increase its block size as needed. To maintain backwards compatibilty the 46 * space map's block size must be a power of 2 and SPACE_MAP_INITIAL_BLOCKSIZE 47 * or larger. 48 */ 49 int space_map_max_blksz = (1 << 12); 50 51 /* 52 * Load the space map disk into the specified range tree. Segments of maptype 53 * are added to the range tree, other segment types are removed. 54 * 55 * Note: space_map_load() will drop sm_lock across dmu_read() calls. 56 * The caller must be OK with this. 57 */ 58 int 59 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) 60 { 61 uint64_t *entry, *entry_map, *entry_map_end; 62 uint64_t bufsize, size, offset, end, space; 63 int error = 0; 64 65 ASSERT(MUTEX_HELD(sm->sm_lock)); 66 67 end = space_map_length(sm); 68 space = space_map_allocated(sm); 69 70 VERIFY0(range_tree_space(rt)); 71 72 if (maptype == SM_FREE) { 73 range_tree_add(rt, sm->sm_start, sm->sm_size); 74 space = sm->sm_size - space; 75 } 76 77 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 78 entry_map = zio_buf_alloc(bufsize); 79 80 mutex_exit(sm->sm_lock); 81 if (end > bufsize) { 82 dmu_prefetch(sm->sm_os, space_map_object(sm), bufsize, 83 end - bufsize); 84 } 85 mutex_enter(sm->sm_lock); 86 87 for (offset = 0; offset < end; offset += bufsize) { 88 size = MIN(end - offset, bufsize); 89 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 90 VERIFY(size != 0); 91 ASSERT3U(sm->sm_blksz, !=, 0); 92 93 dprintf("object=%llu offset=%llx size=%llx\n", 94 space_map_object(sm), offset, size); 95 96 mutex_exit(sm->sm_lock); 97 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size, 98 entry_map, DMU_READ_PREFETCH); 99 mutex_enter(sm->sm_lock); 100 if (error != 0) 101 break; 102 103 entry_map_end = entry_map + (size / sizeof (uint64_t)); 104 for (entry = entry_map; entry < entry_map_end; entry++) { 105 uint64_t e = *entry; 106 uint64_t offset, size; 107 108 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ 109 continue; 110 111 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) + 112 sm->sm_start; 113 size = SM_RUN_DECODE(e) << sm->sm_shift; 114 115 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift)); 116 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift)); 117 VERIFY3U(offset, >=, sm->sm_start); 118 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size); 119 if (SM_TYPE_DECODE(e) == maptype) { 120 VERIFY3U(range_tree_space(rt) + size, <=, 121 sm->sm_size); 122 range_tree_add(rt, offset, size); 123 } else { 124 range_tree_remove(rt, offset, size); 125 } 126 } 127 } 128 129 if (error == 0) 130 VERIFY3U(range_tree_space(rt), ==, space); 131 else 132 range_tree_vacate(rt, NULL, NULL); 133 134 zio_buf_free(entry_map, bufsize); 135 return (error); 136 } 137 138 void 139 space_map_histogram_clear(space_map_t *sm) 140 { 141 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 142 return; 143 144 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram)); 145 } 146 147 boolean_t 148 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) 149 { 150 /* 151 * Verify that the in-core range tree does not have any 152 * ranges smaller than our sm_shift size. 153 */ 154 for (int i = 0; i < sm->sm_shift; i++) { 155 if (rt->rt_histogram[i] != 0) 156 return (B_FALSE); 157 } 158 return (B_TRUE); 159 } 160 161 void 162 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) 163 { 164 int idx = 0; 165 166 ASSERT(MUTEX_HELD(rt->rt_lock)); 167 ASSERT(dmu_tx_is_syncing(tx)); 168 VERIFY3U(space_map_object(sm), !=, 0); 169 170 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 171 return; 172 173 dmu_buf_will_dirty(sm->sm_dbuf, tx); 174 175 ASSERT(space_map_histogram_verify(sm, rt)); 176 177 /* 178 * Transfer the content of the range tree histogram to the space 179 * map histogram. The space map histogram contains 32 buckets ranging 180 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 181 * however, can represent ranges from 2^0 to 2^63. Since the space 182 * map only cares about allocatable blocks (minimum of sm_shift) we 183 * can safely ignore all ranges in the range tree smaller than sm_shift. 184 */ 185 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 186 187 /* 188 * Since the largest histogram bucket in the space map is 189 * 2^(32+sm_shift-1), we need to normalize the values in 190 * the range tree for any bucket larger than that size. For 191 * example given an sm_shift of 9, ranges larger than 2^40 192 * would get normalized as if they were 1TB ranges. Assume 193 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 194 * the calculation below would normalize this to 5 * 2^4 (16). 195 */ 196 ASSERT3U(i, >=, idx + sm->sm_shift); 197 sm->sm_phys->smp_histogram[idx] += 198 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 199 200 /* 201 * Increment the space map's index as long as we haven't 202 * reached the maximum bucket size. Accumulate all ranges 203 * larger than the max bucket size into the last bucket. 204 */ 205 if (idx < SPACE_MAP_HISTOGRAM_SIZE(sm) - 1) { 206 ASSERT3U(idx + sm->sm_shift, ==, i); 207 idx++; 208 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE(sm)); 209 } 210 } 211 } 212 213 uint64_t 214 space_map_entries(space_map_t *sm, range_tree_t *rt) 215 { 216 avl_tree_t *t = &rt->rt_root; 217 range_seg_t *rs; 218 uint64_t size, entries; 219 220 /* 221 * All space_maps always have a debug entry so account for it here. 222 */ 223 entries = 1; 224 225 /* 226 * Traverse the range tree and calculate the number of space map 227 * entries that would be required to write out the range tree. 228 */ 229 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 230 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 231 entries += howmany(size, SM_RUN_MAX); 232 } 233 return (entries); 234 } 235 236 void 237 space_map_set_blocksize(space_map_t *sm, uint64_t size, dmu_tx_t *tx) 238 { 239 uint32_t blksz; 240 u_longlong_t blocks; 241 242 ASSERT3U(sm->sm_blksz, !=, 0); 243 ASSERT3U(space_map_object(sm), !=, 0); 244 ASSERT(sm->sm_dbuf != NULL); 245 VERIFY(ISP2(space_map_max_blksz)); 246 247 if (sm->sm_blksz >= space_map_max_blksz) 248 return; 249 250 /* 251 * The object contains more than one block so we can't adjust 252 * its size. 253 */ 254 if (sm->sm_phys->smp_objsize > sm->sm_blksz) 255 return; 256 257 if (size > sm->sm_blksz) { 258 uint64_t newsz; 259 260 /* 261 * Older software versions treat space map blocks as fixed 262 * entities. The DMU is capable of handling different block 263 * sizes making it possible for us to increase the 264 * block size and maintain backwards compatibility. The 265 * caveat is that the new block sizes must be a 266 * power of 2 so that old software can append to the file, 267 * adding more blocks. The block size can grow until it 268 * reaches space_map_max_blksz. 269 */ 270 newsz = ISP2(size) ? size : 1ULL << highbit(size); 271 if (newsz > space_map_max_blksz) 272 newsz = space_map_max_blksz; 273 274 VERIFY0(dmu_object_set_blocksize(sm->sm_os, 275 space_map_object(sm), newsz, 0, tx)); 276 dmu_object_size_from_db(sm->sm_dbuf, &blksz, &blocks); 277 278 zfs_dbgmsg("txg %llu, spa %s, increasing blksz from %d to %d", 279 dmu_tx_get_txg(tx), spa_name(dmu_objset_spa(sm->sm_os)), 280 sm->sm_blksz, blksz); 281 282 VERIFY3U(newsz, ==, blksz); 283 VERIFY3U(sm->sm_blksz, <, blksz); 284 sm->sm_blksz = blksz; 285 } 286 } 287 288 /* 289 * Note: space_map_write() will drop sm_lock across dmu_write() calls. 290 */ 291 void 292 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 293 dmu_tx_t *tx) 294 { 295 objset_t *os = sm->sm_os; 296 spa_t *spa = dmu_objset_spa(os); 297 avl_tree_t *t = &rt->rt_root; 298 range_seg_t *rs; 299 uint64_t size, total, rt_space, nodes; 300 uint64_t *entry, *entry_map, *entry_map_end; 301 uint64_t newsz, expected_entries, actual_entries = 1; 302 303 ASSERT(MUTEX_HELD(rt->rt_lock)); 304 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 305 VERIFY3U(space_map_object(sm), !=, 0); 306 dmu_buf_will_dirty(sm->sm_dbuf, tx); 307 308 /* 309 * This field is no longer necessary since the in-core space map 310 * now contains the object number but is maintained for backwards 311 * compatibility. 312 */ 313 sm->sm_phys->smp_object = sm->sm_object; 314 315 if (range_tree_space(rt) == 0) { 316 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 317 return; 318 } 319 320 if (maptype == SM_ALLOC) 321 sm->sm_phys->smp_alloc += range_tree_space(rt); 322 else 323 sm->sm_phys->smp_alloc -= range_tree_space(rt); 324 325 expected_entries = space_map_entries(sm, rt); 326 327 /* 328 * Calculate the new size for the space map on-disk and see if 329 * we can grow the block size to accommodate the new size. 330 */ 331 newsz = sm->sm_phys->smp_objsize + expected_entries * sizeof (uint64_t); 332 space_map_set_blocksize(sm, newsz, tx); 333 334 entry_map = zio_buf_alloc(sm->sm_blksz); 335 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t)); 336 entry = entry_map; 337 338 *entry++ = SM_DEBUG_ENCODE(1) | 339 SM_DEBUG_ACTION_ENCODE(maptype) | 340 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | 341 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 342 343 total = 0; 344 nodes = avl_numnodes(&rt->rt_root); 345 rt_space = range_tree_space(rt); 346 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 347 uint64_t start; 348 349 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 350 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift; 351 352 total += size << sm->sm_shift; 353 354 while (size != 0) { 355 uint64_t run_len; 356 357 run_len = MIN(size, SM_RUN_MAX); 358 359 if (entry == entry_map_end) { 360 mutex_exit(rt->rt_lock); 361 dmu_write(os, space_map_object(sm), 362 sm->sm_phys->smp_objsize, sm->sm_blksz, 363 entry_map, tx); 364 mutex_enter(rt->rt_lock); 365 sm->sm_phys->smp_objsize += sm->sm_blksz; 366 entry = entry_map; 367 } 368 369 *entry++ = SM_OFFSET_ENCODE(start) | 370 SM_TYPE_ENCODE(maptype) | 371 SM_RUN_ENCODE(run_len); 372 373 start += run_len; 374 size -= run_len; 375 actual_entries++; 376 } 377 } 378 379 if (entry != entry_map) { 380 size = (entry - entry_map) * sizeof (uint64_t); 381 mutex_exit(rt->rt_lock); 382 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize, 383 size, entry_map, tx); 384 mutex_enter(rt->rt_lock); 385 sm->sm_phys->smp_objsize += size; 386 } 387 ASSERT3U(expected_entries, ==, actual_entries); 388 389 /* 390 * Ensure that the space_map's accounting wasn't changed 391 * while we were in the middle of writing it out. 392 */ 393 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root)); 394 VERIFY3U(range_tree_space(rt), ==, rt_space); 395 VERIFY3U(range_tree_space(rt), ==, total); 396 397 zio_buf_free(entry_map, sm->sm_blksz); 398 } 399 400 static int 401 space_map_open_impl(space_map_t *sm) 402 { 403 int error; 404 u_longlong_t blocks; 405 406 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 407 if (error) 408 return (error); 409 410 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 411 sm->sm_phys = sm->sm_dbuf->db_data; 412 return (0); 413 } 414 415 int 416 space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 417 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp) 418 { 419 space_map_t *sm; 420 int error; 421 422 ASSERT(*smp == NULL); 423 ASSERT(os != NULL); 424 ASSERT(object != 0); 425 426 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP); 427 428 sm->sm_start = start; 429 sm->sm_size = size; 430 sm->sm_shift = shift; 431 sm->sm_lock = lp; 432 sm->sm_os = os; 433 sm->sm_object = object; 434 435 error = space_map_open_impl(sm); 436 if (error != 0) { 437 space_map_close(sm); 438 return (error); 439 } 440 441 *smp = sm; 442 443 return (0); 444 } 445 446 void 447 space_map_close(space_map_t *sm) 448 { 449 if (sm == NULL) 450 return; 451 452 if (sm->sm_dbuf != NULL) 453 dmu_buf_rele(sm->sm_dbuf, sm); 454 sm->sm_dbuf = NULL; 455 sm->sm_phys = NULL; 456 457 kmem_free(sm, sizeof (*sm)); 458 } 459 460 static void 461 space_map_reallocate(space_map_t *sm, dmu_tx_t *tx) 462 { 463 ASSERT(dmu_tx_is_syncing(tx)); 464 465 space_map_free(sm, tx); 466 dmu_buf_rele(sm->sm_dbuf, sm); 467 468 sm->sm_object = space_map_alloc(sm->sm_os, tx); 469 VERIFY0(space_map_open_impl(sm)); 470 } 471 472 void 473 space_map_truncate(space_map_t *sm, dmu_tx_t *tx) 474 { 475 objset_t *os = sm->sm_os; 476 spa_t *spa = dmu_objset_spa(os); 477 zfeature_info_t *space_map_histogram = 478 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM]; 479 dmu_object_info_t doi; 480 int bonuslen; 481 482 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 483 ASSERT(dmu_tx_is_syncing(tx)); 484 485 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 486 dmu_object_info_from_db(sm->sm_dbuf, &doi); 487 488 if (spa_feature_is_enabled(spa, space_map_histogram)) { 489 bonuslen = sizeof (space_map_phys_t); 490 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 491 } else { 492 bonuslen = SPACE_MAP_SIZE_V0; 493 } 494 495 if (bonuslen != doi.doi_bonus_size || 496 doi.doi_data_block_size != SPACE_MAP_INITIAL_BLOCKSIZE) { 497 zfs_dbgmsg("txg %llu, spa %s, reallocating: " 498 "old bonus %u, old blocksz %u", dmu_tx_get_txg(tx), 499 spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size); 500 space_map_reallocate(sm, tx); 501 VERIFY3U(sm->sm_blksz, ==, SPACE_MAP_INITIAL_BLOCKSIZE); 502 } 503 504 dmu_buf_will_dirty(sm->sm_dbuf, tx); 505 sm->sm_phys->smp_objsize = 0; 506 sm->sm_phys->smp_alloc = 0; 507 } 508 509 /* 510 * Update the in-core space_map allocation and length values. 511 */ 512 void 513 space_map_update(space_map_t *sm) 514 { 515 if (sm == NULL) 516 return; 517 518 ASSERT(MUTEX_HELD(sm->sm_lock)); 519 520 sm->sm_alloc = sm->sm_phys->smp_alloc; 521 sm->sm_length = sm->sm_phys->smp_objsize; 522 } 523 524 uint64_t 525 space_map_alloc(objset_t *os, dmu_tx_t *tx) 526 { 527 spa_t *spa = dmu_objset_spa(os); 528 zfeature_info_t *space_map_histogram = 529 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM]; 530 uint64_t object; 531 int bonuslen; 532 533 if (spa_feature_is_enabled(spa, space_map_histogram)) { 534 spa_feature_incr(spa, space_map_histogram, tx); 535 bonuslen = sizeof (space_map_phys_t); 536 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 537 } else { 538 bonuslen = SPACE_MAP_SIZE_V0; 539 } 540 541 object = dmu_object_alloc(os, 542 DMU_OT_SPACE_MAP, SPACE_MAP_INITIAL_BLOCKSIZE, 543 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 544 545 return (object); 546 } 547 548 void 549 space_map_free(space_map_t *sm, dmu_tx_t *tx) 550 { 551 spa_t *spa; 552 zfeature_info_t *space_map_histogram = 553 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM]; 554 555 if (sm == NULL) 556 return; 557 558 spa = dmu_objset_spa(sm->sm_os); 559 if (spa_feature_is_enabled(spa, space_map_histogram)) { 560 dmu_object_info_t doi; 561 562 dmu_object_info_from_db(sm->sm_dbuf, &doi); 563 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 564 VERIFY(spa_feature_is_active(spa, space_map_histogram)); 565 spa_feature_decr(spa, space_map_histogram, tx); 566 } 567 } 568 569 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0); 570 sm->sm_object = 0; 571 } 572 573 uint64_t 574 space_map_object(space_map_t *sm) 575 { 576 return (sm != NULL ? sm->sm_object : 0); 577 } 578 579 /* 580 * Returns the already synced, on-disk allocated space. 581 */ 582 uint64_t 583 space_map_allocated(space_map_t *sm) 584 { 585 return (sm != NULL ? sm->sm_alloc : 0); 586 } 587 588 /* 589 * Returns the already synced, on-disk length; 590 */ 591 uint64_t 592 space_map_length(space_map_t *sm) 593 { 594 return (sm != NULL ? sm->sm_length : 0); 595 } 596 597 /* 598 * Returns the allocated space that is currently syncing. 599 */ 600 int64_t 601 space_map_alloc_delta(space_map_t *sm) 602 { 603 if (sm == NULL) 604 return (0); 605 ASSERT(sm->sm_dbuf != NULL); 606 return (sm->sm_phys->smp_alloc - space_map_allocated(sm)); 607 }