1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright 2014 HybridCluster. All rights reserved. 27 * Copyright 2016 RackTop Systems. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/dmu.h> 32 #include <sys/dmu_impl.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/dbuf.h> 35 #include <sys/dnode.h> 36 #include <sys/zfs_context.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/dmu_traverse.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/dsl_dir.h> 41 #include <sys/dsl_prop.h> 42 #include <sys/dsl_pool.h> 43 #include <sys/dsl_synctask.h> 44 #include <sys/zfs_ioctl.h> 45 #include <sys/zap.h> 46 #include <sys/zio_checksum.h> 47 #include <sys/zfs_znode.h> 48 #include <zfs_fletcher.h> 49 #include <sys/avl.h> 50 #include <sys/ddt.h> 51 #include <sys/zfs_onexit.h> 52 #include <sys/dmu_send.h> 53 #include <sys/dsl_destroy.h> 54 #include <sys/blkptr.h> 55 #include <sys/dsl_bookmark.h> 56 #include <sys/zfeature.h> 57 #include <sys/bqueue.h> 58 59 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 60 int zfs_send_corrupt_data = B_FALSE; 61 int zfs_send_queue_length = 16 * 1024 * 1024; 62 int zfs_recv_queue_length = 16 * 1024 * 1024; 63 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ 64 int zfs_send_set_freerecords_bit = B_TRUE; 65 66 static char *dmu_recv_tag = "dmu_recv_tag"; 67 const char *recv_clone_name = "%recv"; 68 69 #define BP_SPAN(datablkszsec, indblkshift, level) \ 70 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \ 71 (level) * (indblkshift - SPA_BLKPTRSHIFT))) 72 73 static void byteswap_record(dmu_replay_record_t *drr); 74 75 struct send_thread_arg { 76 bqueue_t q; 77 dsl_dataset_t *ds; /* Dataset to traverse */ 78 uint64_t fromtxg; /* Traverse from this txg */ 79 int flags; /* flags to pass to traverse_dataset */ 80 int error_code; 81 boolean_t cancel; 82 zbookmark_phys_t resume; 83 }; 84 85 struct send_block_record { 86 boolean_t eos_marker; /* Marks the end of the stream */ 87 blkptr_t bp; 88 zbookmark_phys_t zb; 89 uint8_t indblkshift; 90 uint16_t datablkszsec; 91 bqueue_node_t ln; 92 }; 93 94 static int 95 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 96 { 97 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os); 98 ssize_t resid; /* have to get resid to get detailed errno */ 99 100 /* 101 * The code does not rely on this (len being a multiple of 8). We keep 102 * this assertion because of the corresponding assertion in 103 * receive_read(). Keeping this assertion ensures that we do not 104 * inadvertently break backwards compatibility (causing the assertion 105 * in receive_read() to trigger on old software). 106 * 107 * Removing the assertions could be rolled into a new feature that uses 108 * data that isn't 8-byte aligned; if the assertions were removed, a 109 * feature flag would have to be added. 110 */ 111 112 ASSERT0(len % 8); 113 114 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp, 115 (caddr_t)buf, len, 116 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); 117 118 mutex_enter(&ds->ds_sendstream_lock); 119 *dsp->dsa_off += len; 120 mutex_exit(&ds->ds_sendstream_lock); 121 122 return (dsp->dsa_err); 123 } 124 125 /* 126 * For all record types except BEGIN, fill in the checksum (overlaid in 127 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything 128 * up to the start of the checksum itself. 129 */ 130 static int 131 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len) 132 { 133 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 134 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 135 fletcher_4_incremental_native(dsp->dsa_drr, 136 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 137 &dsp->dsa_zc); 138 if (dsp->dsa_drr->drr_type != DRR_BEGIN) { 139 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u. 140 drr_checksum.drr_checksum)); 141 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc; 142 } 143 fletcher_4_incremental_native(&dsp->dsa_drr-> 144 drr_u.drr_checksum.drr_checksum, 145 sizeof (zio_cksum_t), &dsp->dsa_zc); 146 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 147 return (SET_ERROR(EINTR)); 148 if (payload_len != 0) { 149 fletcher_4_incremental_native(payload, payload_len, 150 &dsp->dsa_zc); 151 if (dump_bytes(dsp, payload, payload_len) != 0) 152 return (SET_ERROR(EINTR)); 153 } 154 return (0); 155 } 156 157 /* 158 * Fill in the drr_free struct, or perform aggregation if the previous record is 159 * also a free record, and the two are adjacent. 160 * 161 * Note that we send free records even for a full send, because we want to be 162 * able to receive a full send as a clone, which requires a list of all the free 163 * and freeobject records that were generated on the source. 164 */ 165 static int 166 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 167 uint64_t length) 168 { 169 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 170 171 /* 172 * When we receive a free record, dbuf_free_range() assumes 173 * that the receiving system doesn't have any dbufs in the range 174 * being freed. This is always true because there is a one-record 175 * constraint: we only send one WRITE record for any given 176 * object,offset. We know that the one-record constraint is 177 * true because we always send data in increasing order by 178 * object,offset. 179 * 180 * If the increasing-order constraint ever changes, we should find 181 * another way to assert that the one-record constraint is still 182 * satisfied. 183 */ 184 ASSERT(object > dsp->dsa_last_data_object || 185 (object == dsp->dsa_last_data_object && 186 offset > dsp->dsa_last_data_offset)); 187 188 if (length != -1ULL && offset + length < offset) 189 length = -1ULL; 190 191 /* 192 * If there is a pending op, but it's not PENDING_FREE, push it out, 193 * since free block aggregation can only be done for blocks of the 194 * same type (i.e., DRR_FREE records can only be aggregated with 195 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 196 * aggregated with other DRR_FREEOBJECTS records. 197 */ 198 if (dsp->dsa_pending_op != PENDING_NONE && 199 dsp->dsa_pending_op != PENDING_FREE) { 200 if (dump_record(dsp, NULL, 0) != 0) 201 return (SET_ERROR(EINTR)); 202 dsp->dsa_pending_op = PENDING_NONE; 203 } 204 205 if (dsp->dsa_pending_op == PENDING_FREE) { 206 /* 207 * There should never be a PENDING_FREE if length is -1 208 * (because dump_dnode is the only place where this 209 * function is called with a -1, and only after flushing 210 * any pending record). 211 */ 212 ASSERT(length != -1ULL); 213 /* 214 * Check to see whether this free block can be aggregated 215 * with pending one. 216 */ 217 if (drrf->drr_object == object && drrf->drr_offset + 218 drrf->drr_length == offset) { 219 drrf->drr_length += length; 220 return (0); 221 } else { 222 /* not a continuation. Push out pending record */ 223 if (dump_record(dsp, NULL, 0) != 0) 224 return (SET_ERROR(EINTR)); 225 dsp->dsa_pending_op = PENDING_NONE; 226 } 227 } 228 /* create a FREE record and make it pending */ 229 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 230 dsp->dsa_drr->drr_type = DRR_FREE; 231 drrf->drr_object = object; 232 drrf->drr_offset = offset; 233 drrf->drr_length = length; 234 drrf->drr_toguid = dsp->dsa_toguid; 235 if (length == -1ULL) { 236 if (dump_record(dsp, NULL, 0) != 0) 237 return (SET_ERROR(EINTR)); 238 } else { 239 dsp->dsa_pending_op = PENDING_FREE; 240 } 241 242 return (0); 243 } 244 245 static int 246 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, 247 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data) 248 { 249 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 250 251 /* 252 * We send data in increasing object, offset order. 253 * See comment in dump_free() for details. 254 */ 255 ASSERT(object > dsp->dsa_last_data_object || 256 (object == dsp->dsa_last_data_object && 257 offset > dsp->dsa_last_data_offset)); 258 dsp->dsa_last_data_object = object; 259 dsp->dsa_last_data_offset = offset + blksz - 1; 260 261 /* 262 * If there is any kind of pending aggregation (currently either 263 * a grouping of free objects or free blocks), push it out to 264 * the stream, since aggregation can't be done across operations 265 * of different types. 266 */ 267 if (dsp->dsa_pending_op != PENDING_NONE) { 268 if (dump_record(dsp, NULL, 0) != 0) 269 return (SET_ERROR(EINTR)); 270 dsp->dsa_pending_op = PENDING_NONE; 271 } 272 /* write a WRITE record */ 273 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 274 dsp->dsa_drr->drr_type = DRR_WRITE; 275 drrw->drr_object = object; 276 drrw->drr_type = type; 277 drrw->drr_offset = offset; 278 drrw->drr_length = blksz; 279 drrw->drr_toguid = dsp->dsa_toguid; 280 if (bp == NULL || BP_IS_EMBEDDED(bp)) { 281 /* 282 * There's no pre-computed checksum for partial-block 283 * writes or embedded BP's, so (like 284 * fletcher4-checkummed blocks) userland will have to 285 * compute a dedup-capable checksum itself. 286 */ 287 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 288 } else { 289 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 290 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & 291 ZCHECKSUM_FLAG_DEDUP) 292 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 293 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 294 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 295 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 296 drrw->drr_key.ddk_cksum = bp->blk_cksum; 297 } 298 299 if (dump_record(dsp, data, blksz) != 0) 300 return (SET_ERROR(EINTR)); 301 return (0); 302 } 303 304 static int 305 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 306 int blksz, const blkptr_t *bp) 307 { 308 char buf[BPE_PAYLOAD_SIZE]; 309 struct drr_write_embedded *drrw = 310 &(dsp->dsa_drr->drr_u.drr_write_embedded); 311 312 if (dsp->dsa_pending_op != PENDING_NONE) { 313 if (dump_record(dsp, NULL, 0) != 0) 314 return (EINTR); 315 dsp->dsa_pending_op = PENDING_NONE; 316 } 317 318 ASSERT(BP_IS_EMBEDDED(bp)); 319 320 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 321 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED; 322 drrw->drr_object = object; 323 drrw->drr_offset = offset; 324 drrw->drr_length = blksz; 325 drrw->drr_toguid = dsp->dsa_toguid; 326 drrw->drr_compression = BP_GET_COMPRESS(bp); 327 drrw->drr_etype = BPE_GET_ETYPE(bp); 328 drrw->drr_lsize = BPE_GET_LSIZE(bp); 329 drrw->drr_psize = BPE_GET_PSIZE(bp); 330 331 decode_embedded_bp_compressed(bp, buf); 332 333 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) 334 return (EINTR); 335 return (0); 336 } 337 338 static int 339 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 340 { 341 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 342 343 if (dsp->dsa_pending_op != PENDING_NONE) { 344 if (dump_record(dsp, NULL, 0) != 0) 345 return (SET_ERROR(EINTR)); 346 dsp->dsa_pending_op = PENDING_NONE; 347 } 348 349 /* write a SPILL record */ 350 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 351 dsp->dsa_drr->drr_type = DRR_SPILL; 352 drrs->drr_object = object; 353 drrs->drr_length = blksz; 354 drrs->drr_toguid = dsp->dsa_toguid; 355 356 if (dump_record(dsp, data, blksz) != 0) 357 return (SET_ERROR(EINTR)); 358 return (0); 359 } 360 361 static int 362 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 363 { 364 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 365 366 /* 367 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 368 * push it out, since free block aggregation can only be done for 369 * blocks of the same type (i.e., DRR_FREE records can only be 370 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 371 * can only be aggregated with other DRR_FREEOBJECTS records. 372 */ 373 if (dsp->dsa_pending_op != PENDING_NONE && 374 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 375 if (dump_record(dsp, NULL, 0) != 0) 376 return (SET_ERROR(EINTR)); 377 dsp->dsa_pending_op = PENDING_NONE; 378 } 379 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 380 /* 381 * See whether this free object array can be aggregated 382 * with pending one 383 */ 384 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 385 drrfo->drr_numobjs += numobjs; 386 return (0); 387 } else { 388 /* can't be aggregated. Push out pending record */ 389 if (dump_record(dsp, NULL, 0) != 0) 390 return (SET_ERROR(EINTR)); 391 dsp->dsa_pending_op = PENDING_NONE; 392 } 393 } 394 395 /* write a FREEOBJECTS record */ 396 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 397 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 398 drrfo->drr_firstobj = firstobj; 399 drrfo->drr_numobjs = numobjs; 400 drrfo->drr_toguid = dsp->dsa_toguid; 401 402 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 403 404 return (0); 405 } 406 407 static int 408 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 409 { 410 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 411 412 if (object < dsp->dsa_resume_object) { 413 /* 414 * Note: when resuming, we will visit all the dnodes in 415 * the block of dnodes that we are resuming from. In 416 * this case it's unnecessary to send the dnodes prior to 417 * the one we are resuming from. We should be at most one 418 * block's worth of dnodes behind the resume point. 419 */ 420 ASSERT3U(dsp->dsa_resume_object - object, <, 421 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); 422 return (0); 423 } 424 425 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 426 return (dump_freeobjects(dsp, object, 1)); 427 428 if (dsp->dsa_pending_op != PENDING_NONE) { 429 if (dump_record(dsp, NULL, 0) != 0) 430 return (SET_ERROR(EINTR)); 431 dsp->dsa_pending_op = PENDING_NONE; 432 } 433 434 /* write an OBJECT record */ 435 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 436 dsp->dsa_drr->drr_type = DRR_OBJECT; 437 drro->drr_object = object; 438 drro->drr_type = dnp->dn_type; 439 drro->drr_bonustype = dnp->dn_bonustype; 440 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 441 drro->drr_bonuslen = dnp->dn_bonuslen; 442 drro->drr_checksumtype = dnp->dn_checksum; 443 drro->drr_compress = dnp->dn_compress; 444 drro->drr_toguid = dsp->dsa_toguid; 445 446 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 447 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) 448 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; 449 450 if (dump_record(dsp, DN_BONUS(dnp), 451 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) { 452 return (SET_ERROR(EINTR)); 453 } 454 455 /* Free anything past the end of the file. */ 456 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 457 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 458 return (SET_ERROR(EINTR)); 459 if (dsp->dsa_err != 0) 460 return (SET_ERROR(EINTR)); 461 return (0); 462 } 463 464 static boolean_t 465 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp) 466 { 467 if (!BP_IS_EMBEDDED(bp)) 468 return (B_FALSE); 469 470 /* 471 * Compression function must be legacy, or explicitly enabled. 472 */ 473 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 474 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4))) 475 return (B_FALSE); 476 477 /* 478 * Embed type must be explicitly enabled. 479 */ 480 switch (BPE_GET_ETYPE(bp)) { 481 case BP_EMBEDDED_TYPE_DATA: 482 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 483 return (B_TRUE); 484 break; 485 default: 486 return (B_FALSE); 487 } 488 return (B_FALSE); 489 } 490 491 /* 492 * This is the callback function to traverse_dataset that acts as the worker 493 * thread for dmu_send_impl. 494 */ 495 /*ARGSUSED*/ 496 static int 497 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 498 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) 499 { 500 struct send_thread_arg *sta = arg; 501 struct send_block_record *record; 502 uint64_t record_size; 503 int err = 0; 504 505 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 506 zb->zb_object >= sta->resume.zb_object); 507 508 if (sta->cancel) 509 return (SET_ERROR(EINTR)); 510 511 if (bp == NULL) { 512 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL); 513 return (0); 514 } else if (zb->zb_level < 0) { 515 return (0); 516 } 517 518 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP); 519 record->eos_marker = B_FALSE; 520 record->bp = *bp; 521 record->zb = *zb; 522 record->indblkshift = dnp->dn_indblkshift; 523 record->datablkszsec = dnp->dn_datablkszsec; 524 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 525 bqueue_enqueue(&sta->q, record, record_size); 526 527 return (err); 528 } 529 530 /* 531 * This function kicks off the traverse_dataset. It also handles setting the 532 * error code of the thread in case something goes wrong, and pushes the End of 533 * Stream record when the traverse_dataset call has finished. If there is no 534 * dataset to traverse, the thread immediately pushes End of Stream marker. 535 */ 536 static void 537 send_traverse_thread(void *arg) 538 { 539 struct send_thread_arg *st_arg = arg; 540 int err; 541 struct send_block_record *data; 542 543 if (st_arg->ds != NULL) { 544 err = traverse_dataset_resume(st_arg->ds, 545 st_arg->fromtxg, &st_arg->resume, 546 st_arg->flags, send_cb, st_arg); 547 548 if (err != EINTR) 549 st_arg->error_code = err; 550 } 551 data = kmem_zalloc(sizeof (*data), KM_SLEEP); 552 data->eos_marker = B_TRUE; 553 bqueue_enqueue(&st_arg->q, data, 1); 554 } 555 556 /* 557 * This function actually handles figuring out what kind of record needs to be 558 * dumped, reading the data (which has hopefully been prefetched), and calling 559 * the appropriate helper function. 560 */ 561 static int 562 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data) 563 { 564 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os); 565 const blkptr_t *bp = &data->bp; 566 const zbookmark_phys_t *zb = &data->zb; 567 uint8_t indblkshift = data->indblkshift; 568 uint16_t dblkszsec = data->datablkszsec; 569 spa_t *spa = ds->ds_dir->dd_pool->dp_spa; 570 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 571 int err = 0; 572 573 ASSERT3U(zb->zb_level, >=, 0); 574 575 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 576 zb->zb_object >= dsa->dsa_resume_object); 577 578 if (zb->zb_object != DMU_META_DNODE_OBJECT && 579 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 580 return (0); 581 } else if (BP_IS_HOLE(bp) && 582 zb->zb_object == DMU_META_DNODE_OBJECT) { 583 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 584 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 585 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT); 586 } else if (BP_IS_HOLE(bp)) { 587 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 588 uint64_t offset = zb->zb_blkid * span; 589 err = dump_free(dsa, zb->zb_object, offset, span); 590 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 591 return (0); 592 } else if (type == DMU_OT_DNODE) { 593 int blksz = BP_GET_LSIZE(bp); 594 arc_flags_t aflags = ARC_FLAG_WAIT; 595 arc_buf_t *abuf; 596 597 ASSERT0(zb->zb_level); 598 599 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 600 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 601 &aflags, zb) != 0) 602 return (SET_ERROR(EIO)); 603 604 dnode_phys_t *blk = abuf->b_data; 605 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT); 606 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) { 607 err = dump_dnode(dsa, dnobj + i, blk + i); 608 if (err != 0) 609 break; 610 } 611 (void) arc_buf_remove_ref(abuf, &abuf); 612 } else if (type == DMU_OT_SA) { 613 arc_flags_t aflags = ARC_FLAG_WAIT; 614 arc_buf_t *abuf; 615 int blksz = BP_GET_LSIZE(bp); 616 617 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 618 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 619 &aflags, zb) != 0) 620 return (SET_ERROR(EIO)); 621 622 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data); 623 (void) arc_buf_remove_ref(abuf, &abuf); 624 } else if (backup_do_embed(dsa, bp)) { 625 /* it's an embedded level-0 block of a regular object */ 626 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 627 ASSERT0(zb->zb_level); 628 err = dump_write_embedded(dsa, zb->zb_object, 629 zb->zb_blkid * blksz, blksz, bp); 630 } else { 631 /* it's a level-0 block of a regular object */ 632 arc_flags_t aflags = ARC_FLAG_WAIT; 633 arc_buf_t *abuf; 634 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 635 uint64_t offset; 636 637 ASSERT0(zb->zb_level); 638 ASSERT(zb->zb_object > dsa->dsa_resume_object || 639 (zb->zb_object == dsa->dsa_resume_object && 640 zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); 641 642 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 643 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 644 &aflags, zb) != 0) { 645 if (zfs_send_corrupt_data) { 646 /* Send a block filled with 0x"zfs badd bloc" */ 647 abuf = arc_buf_alloc(spa, blksz, &abuf, 648 ARC_BUFC_DATA); 649 uint64_t *ptr; 650 for (ptr = abuf->b_data; 651 (char *)ptr < (char *)abuf->b_data + blksz; 652 ptr++) 653 *ptr = 0x2f5baddb10cULL; 654 } else { 655 return (SET_ERROR(EIO)); 656 } 657 } 658 659 offset = zb->zb_blkid * blksz; 660 661 if (!(dsa->dsa_featureflags & 662 DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 663 blksz > SPA_OLD_MAXBLOCKSIZE) { 664 char *buf = abuf->b_data; 665 while (blksz > 0 && err == 0) { 666 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE); 667 err = dump_write(dsa, type, zb->zb_object, 668 offset, n, NULL, buf); 669 offset += n; 670 buf += n; 671 blksz -= n; 672 } 673 } else { 674 err = dump_write(dsa, type, zb->zb_object, 675 offset, blksz, bp, abuf->b_data); 676 } 677 (void) arc_buf_remove_ref(abuf, &abuf); 678 } 679 680 ASSERT(err == 0 || err == EINTR); 681 return (err); 682 } 683 684 /* 685 * Pop the new data off the queue, and free the old data. 686 */ 687 static struct send_block_record * 688 get_next_record(bqueue_t *bq, struct send_block_record *data) 689 { 690 struct send_block_record *tmp = bqueue_dequeue(bq); 691 kmem_free(data, sizeof (*data)); 692 return (tmp); 693 } 694 695 /* 696 * Actually do the bulk of the work in a zfs send. 697 * 698 * Note: Releases dp using the specified tag. 699 */ 700 static int 701 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds, 702 zfs_bookmark_phys_t *ancestor_zb, 703 boolean_t is_clone, boolean_t embedok, boolean_t large_block_ok, int outfd, 704 uint64_t resumeobj, uint64_t resumeoff, 705 vnode_t *vp, offset_t *off) 706 { 707 objset_t *os; 708 dmu_replay_record_t *drr; 709 dmu_sendarg_t *dsp; 710 int err; 711 uint64_t fromtxg = 0; 712 uint64_t featureflags = 0; 713 struct send_thread_arg to_arg = { 0 }; 714 715 err = dmu_objset_from_ds(to_ds, &os); 716 if (err != 0) { 717 dsl_pool_rele(dp, tag); 718 return (err); 719 } 720 721 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 722 drr->drr_type = DRR_BEGIN; 723 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 724 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 725 DMU_SUBSTREAM); 726 727 #ifdef _KERNEL 728 if (dmu_objset_type(os) == DMU_OST_ZFS) { 729 uint64_t version; 730 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 731 kmem_free(drr, sizeof (dmu_replay_record_t)); 732 dsl_pool_rele(dp, tag); 733 return (SET_ERROR(EINVAL)); 734 } 735 if (version >= ZPL_VERSION_SA) { 736 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 737 } 738 } 739 #endif 740 741 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS]) 742 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; 743 if (embedok && 744 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 745 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 746 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 747 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4; 748 } 749 750 if (resumeobj != 0 || resumeoff != 0) { 751 featureflags |= DMU_BACKUP_FEATURE_RESUMING; 752 } 753 754 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo, 755 featureflags); 756 757 drr->drr_u.drr_begin.drr_creation_time = 758 dsl_dataset_phys(to_ds)->ds_creation_time; 759 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 760 if (is_clone) 761 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 762 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; 763 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET) 764 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 765 if (zfs_send_set_freerecords_bit) 766 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS; 767 768 if (ancestor_zb != NULL) { 769 drr->drr_u.drr_begin.drr_fromguid = 770 ancestor_zb->zbm_guid; 771 fromtxg = ancestor_zb->zbm_creation_txg; 772 } 773 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname); 774 if (!to_ds->ds_is_snapshot) { 775 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--", 776 sizeof (drr->drr_u.drr_begin.drr_toname)); 777 } 778 779 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 780 781 dsp->dsa_drr = drr; 782 dsp->dsa_vp = vp; 783 dsp->dsa_outfd = outfd; 784 dsp->dsa_proc = curproc; 785 dsp->dsa_os = os; 786 dsp->dsa_off = off; 787 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid; 788 dsp->dsa_pending_op = PENDING_NONE; 789 dsp->dsa_featureflags = featureflags; 790 dsp->dsa_resume_object = resumeobj; 791 dsp->dsa_resume_offset = resumeoff; 792 793 mutex_enter(&to_ds->ds_sendstream_lock); 794 list_insert_head(&to_ds->ds_sendstreams, dsp); 795 mutex_exit(&to_ds->ds_sendstream_lock); 796 797 dsl_dataset_long_hold(to_ds, FTAG); 798 dsl_pool_rele(dp, tag); 799 800 void *payload = NULL; 801 size_t payload_len = 0; 802 if (resumeobj != 0 || resumeoff != 0) { 803 dmu_object_info_t to_doi; 804 err = dmu_object_info(os, resumeobj, &to_doi); 805 if (err != 0) 806 goto out; 807 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0, 808 resumeoff / to_doi.doi_data_block_size); 809 810 nvlist_t *nvl = fnvlist_alloc(); 811 fnvlist_add_uint64(nvl, "resume_object", resumeobj); 812 fnvlist_add_uint64(nvl, "resume_offset", resumeoff); 813 payload = fnvlist_pack(nvl, &payload_len); 814 drr->drr_payloadlen = payload_len; 815 fnvlist_free(nvl); 816 } 817 818 err = dump_record(dsp, payload, payload_len); 819 fnvlist_pack_free(payload, payload_len); 820 if (err != 0) { 821 err = dsp->dsa_err; 822 goto out; 823 } 824 825 err = bqueue_init(&to_arg.q, zfs_send_queue_length, 826 offsetof(struct send_block_record, ln)); 827 to_arg.error_code = 0; 828 to_arg.cancel = B_FALSE; 829 to_arg.ds = to_ds; 830 to_arg.fromtxg = fromtxg; 831 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH; 832 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc, 833 TS_RUN, minclsyspri); 834 835 struct send_block_record *to_data; 836 to_data = bqueue_dequeue(&to_arg.q); 837 838 while (!to_data->eos_marker && err == 0) { 839 err = do_dump(dsp, to_data); 840 to_data = get_next_record(&to_arg.q, to_data); 841 if (issig(JUSTLOOKING) && issig(FORREAL)) 842 err = EINTR; 843 } 844 845 if (err != 0) { 846 to_arg.cancel = B_TRUE; 847 while (!to_data->eos_marker) { 848 to_data = get_next_record(&to_arg.q, to_data); 849 } 850 } 851 kmem_free(to_data, sizeof (*to_data)); 852 853 bqueue_destroy(&to_arg.q); 854 855 if (err == 0 && to_arg.error_code != 0) 856 err = to_arg.error_code; 857 858 if (err != 0) 859 goto out; 860 861 if (dsp->dsa_pending_op != PENDING_NONE) 862 if (dump_record(dsp, NULL, 0) != 0) 863 err = SET_ERROR(EINTR); 864 865 if (err != 0) { 866 if (err == EINTR && dsp->dsa_err != 0) 867 err = dsp->dsa_err; 868 goto out; 869 } 870 871 bzero(drr, sizeof (dmu_replay_record_t)); 872 drr->drr_type = DRR_END; 873 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 874 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 875 876 if (dump_record(dsp, NULL, 0) != 0) 877 err = dsp->dsa_err; 878 879 out: 880 mutex_enter(&to_ds->ds_sendstream_lock); 881 list_remove(&to_ds->ds_sendstreams, dsp); 882 mutex_exit(&to_ds->ds_sendstream_lock); 883 884 kmem_free(drr, sizeof (dmu_replay_record_t)); 885 kmem_free(dsp, sizeof (dmu_sendarg_t)); 886 887 dsl_dataset_long_rele(to_ds, FTAG); 888 889 return (err); 890 } 891 892 int 893 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 894 boolean_t embedok, boolean_t large_block_ok, 895 int outfd, vnode_t *vp, offset_t *off) 896 { 897 dsl_pool_t *dp; 898 dsl_dataset_t *ds; 899 dsl_dataset_t *fromds = NULL; 900 int err; 901 902 err = dsl_pool_hold(pool, FTAG, &dp); 903 if (err != 0) 904 return (err); 905 906 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 907 if (err != 0) { 908 dsl_pool_rele(dp, FTAG); 909 return (err); 910 } 911 912 if (fromsnap != 0) { 913 zfs_bookmark_phys_t zb; 914 boolean_t is_clone; 915 916 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 917 if (err != 0) { 918 dsl_dataset_rele(ds, FTAG); 919 dsl_pool_rele(dp, FTAG); 920 return (err); 921 } 922 if (!dsl_dataset_is_before(ds, fromds, 0)) 923 err = SET_ERROR(EXDEV); 924 zb.zbm_creation_time = 925 dsl_dataset_phys(fromds)->ds_creation_time; 926 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg; 927 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 928 is_clone = (fromds->ds_dir != ds->ds_dir); 929 dsl_dataset_rele(fromds, FTAG); 930 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 931 embedok, large_block_ok, outfd, 0, 0, vp, off); 932 } else { 933 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 934 embedok, large_block_ok, outfd, 0, 0, vp, off); 935 } 936 dsl_dataset_rele(ds, FTAG); 937 return (err); 938 } 939 940 int 941 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 942 boolean_t large_block_ok, int outfd, uint64_t resumeobj, uint64_t resumeoff, 943 vnode_t *vp, offset_t *off) 944 { 945 dsl_pool_t *dp; 946 dsl_dataset_t *ds; 947 int err; 948 boolean_t owned = B_FALSE; 949 950 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 951 return (SET_ERROR(EINVAL)); 952 953 err = dsl_pool_hold(tosnap, FTAG, &dp); 954 if (err != 0) 955 return (err); 956 957 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) { 958 /* 959 * We are sending a filesystem or volume. Ensure 960 * that it doesn't change by owning the dataset. 961 */ 962 err = dsl_dataset_own(dp, tosnap, FTAG, &ds); 963 owned = B_TRUE; 964 } else { 965 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 966 } 967 if (err != 0) { 968 dsl_pool_rele(dp, FTAG); 969 return (err); 970 } 971 972 if (fromsnap != NULL) { 973 zfs_bookmark_phys_t zb; 974 boolean_t is_clone = B_FALSE; 975 int fsnamelen = strchr(tosnap, '@') - tosnap; 976 977 /* 978 * If the fromsnap is in a different filesystem, then 979 * mark the send stream as a clone. 980 */ 981 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 982 (fromsnap[fsnamelen] != '@' && 983 fromsnap[fsnamelen] != '#')) { 984 is_clone = B_TRUE; 985 } 986 987 if (strchr(fromsnap, '@')) { 988 dsl_dataset_t *fromds; 989 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 990 if (err == 0) { 991 if (!dsl_dataset_is_before(ds, fromds, 0)) 992 err = SET_ERROR(EXDEV); 993 zb.zbm_creation_time = 994 dsl_dataset_phys(fromds)->ds_creation_time; 995 zb.zbm_creation_txg = 996 dsl_dataset_phys(fromds)->ds_creation_txg; 997 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 998 is_clone = (ds->ds_dir != fromds->ds_dir); 999 dsl_dataset_rele(fromds, FTAG); 1000 } 1001 } else { 1002 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb); 1003 } 1004 if (err != 0) { 1005 dsl_dataset_rele(ds, FTAG); 1006 dsl_pool_rele(dp, FTAG); 1007 return (err); 1008 } 1009 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 1010 embedok, large_block_ok, 1011 outfd, resumeobj, resumeoff, vp, off); 1012 } else { 1013 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 1014 embedok, large_block_ok, 1015 outfd, resumeobj, resumeoff, vp, off); 1016 } 1017 if (owned) 1018 dsl_dataset_disown(ds, FTAG); 1019 else 1020 dsl_dataset_rele(ds, FTAG); 1021 return (err); 1022 } 1023 1024 static int 1025 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size, 1026 uint64_t *sizep) 1027 { 1028 int err; 1029 /* 1030 * Assume that space (both on-disk and in-stream) is dominated by 1031 * data. We will adjust for indirect blocks and the copies property, 1032 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 1033 */ 1034 1035 /* 1036 * Subtract out approximate space used by indirect blocks. 1037 * Assume most space is used by data blocks (non-indirect, non-dnode). 1038 * Assume all blocks are recordsize. Assume ditto blocks and 1039 * internal fragmentation counter out compression. 1040 * 1041 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 1042 * block, which we observe in practice. 1043 */ 1044 uint64_t recordsize; 1045 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize); 1046 if (err != 0) 1047 return (err); 1048 size -= size / recordsize * sizeof (blkptr_t); 1049 1050 /* Add in the space for the record associated with each block. */ 1051 size += size / recordsize * sizeof (dmu_replay_record_t); 1052 1053 *sizep = size; 1054 1055 return (0); 1056 } 1057 1058 int 1059 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep) 1060 { 1061 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1062 int err; 1063 uint64_t size; 1064 1065 ASSERT(dsl_pool_config_held(dp)); 1066 1067 /* tosnap must be a snapshot */ 1068 if (!ds->ds_is_snapshot) 1069 return (SET_ERROR(EINVAL)); 1070 1071 /* fromsnap, if provided, must be a snapshot */ 1072 if (fromds != NULL && !fromds->ds_is_snapshot) 1073 return (SET_ERROR(EINVAL)); 1074 1075 /* 1076 * fromsnap must be an earlier snapshot from the same fs as tosnap, 1077 * or the origin's fs. 1078 */ 1079 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0)) 1080 return (SET_ERROR(EXDEV)); 1081 1082 /* Get uncompressed size estimate of changed data. */ 1083 if (fromds == NULL) { 1084 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes; 1085 } else { 1086 uint64_t used, comp; 1087 err = dsl_dataset_space_written(fromds, ds, 1088 &used, &comp, &size); 1089 if (err != 0) 1090 return (err); 1091 } 1092 1093 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep); 1094 return (err); 1095 } 1096 1097 /* 1098 * Simple callback used to traverse the blocks of a snapshot and sum their 1099 * uncompressed size 1100 */ 1101 /* ARGSUSED */ 1102 static int 1103 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1104 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1105 { 1106 uint64_t *spaceptr = arg; 1107 if (bp != NULL && !BP_IS_HOLE(bp)) { 1108 *spaceptr += BP_GET_UCSIZE(bp); 1109 } 1110 return (0); 1111 } 1112 1113 /* 1114 * Given a desination snapshot and a TXG, calculate the approximate size of a 1115 * send stream sent from that TXG. from_txg may be zero, indicating that the 1116 * whole snapshot will be sent. 1117 */ 1118 int 1119 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg, 1120 uint64_t *sizep) 1121 { 1122 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1123 int err; 1124 uint64_t size = 0; 1125 1126 ASSERT(dsl_pool_config_held(dp)); 1127 1128 /* tosnap must be a snapshot */ 1129 if (!dsl_dataset_is_snapshot(ds)) 1130 return (SET_ERROR(EINVAL)); 1131 1132 /* verify that from_txg is before the provided snapshot was taken */ 1133 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) { 1134 return (SET_ERROR(EXDEV)); 1135 } 1136 1137 /* 1138 * traverse the blocks of the snapshot with birth times after 1139 * from_txg, summing their uncompressed size 1140 */ 1141 err = traverse_dataset(ds, from_txg, TRAVERSE_POST, 1142 dmu_calculate_send_traversal, &size); 1143 if (err) 1144 return (err); 1145 1146 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep); 1147 return (err); 1148 } 1149 1150 typedef struct dmu_recv_begin_arg { 1151 const char *drba_origin; 1152 dmu_recv_cookie_t *drba_cookie; 1153 cred_t *drba_cred; 1154 uint64_t drba_snapobj; 1155 } dmu_recv_begin_arg_t; 1156 1157 static int 1158 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 1159 uint64_t fromguid) 1160 { 1161 uint64_t val; 1162 int error; 1163 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1164 1165 /* temporary clone name must not exist */ 1166 error = zap_lookup(dp->dp_meta_objset, 1167 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, 1168 8, 1, &val); 1169 if (error != ENOENT) 1170 return (error == 0 ? EBUSY : error); 1171 1172 /* new snapshot name must not exist */ 1173 error = zap_lookup(dp->dp_meta_objset, 1174 dsl_dataset_phys(ds)->ds_snapnames_zapobj, 1175 drba->drba_cookie->drc_tosnap, 8, 1, &val); 1176 if (error != ENOENT) 1177 return (error == 0 ? EEXIST : error); 1178 1179 /* 1180 * Check snapshot limit before receiving. We'll recheck again at the 1181 * end, but might as well abort before receiving if we're already over 1182 * the limit. 1183 * 1184 * Note that we do not check the file system limit with 1185 * dsl_dir_fscount_check because the temporary %clones don't count 1186 * against that limit. 1187 */ 1188 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, 1189 NULL, drba->drba_cred); 1190 if (error != 0) 1191 return (error); 1192 1193 if (fromguid != 0) { 1194 dsl_dataset_t *snap; 1195 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1196 1197 /* Find snapshot in this dir that matches fromguid. */ 1198 while (obj != 0) { 1199 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1200 &snap); 1201 if (error != 0) 1202 return (SET_ERROR(ENODEV)); 1203 if (snap->ds_dir != ds->ds_dir) { 1204 dsl_dataset_rele(snap, FTAG); 1205 return (SET_ERROR(ENODEV)); 1206 } 1207 if (dsl_dataset_phys(snap)->ds_guid == fromguid) 1208 break; 1209 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 1210 dsl_dataset_rele(snap, FTAG); 1211 } 1212 if (obj == 0) 1213 return (SET_ERROR(ENODEV)); 1214 1215 if (drba->drba_cookie->drc_force) { 1216 drba->drba_snapobj = obj; 1217 } else { 1218 /* 1219 * If we are not forcing, there must be no 1220 * changes since fromsnap. 1221 */ 1222 if (dsl_dataset_modified_since_snap(ds, snap)) { 1223 dsl_dataset_rele(snap, FTAG); 1224 return (SET_ERROR(ETXTBSY)); 1225 } 1226 drba->drba_snapobj = ds->ds_prev->ds_object; 1227 } 1228 1229 dsl_dataset_rele(snap, FTAG); 1230 } else { 1231 /* if full, then must be forced */ 1232 if (!drba->drba_cookie->drc_force) 1233 return (SET_ERROR(EEXIST)); 1234 /* start from $ORIGIN@$ORIGIN, if supported */ 1235 drba->drba_snapobj = dp->dp_origin_snap != NULL ? 1236 dp->dp_origin_snap->ds_object : 0; 1237 } 1238 1239 return (0); 1240 1241 } 1242 1243 static int 1244 dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 1245 { 1246 dmu_recv_begin_arg_t *drba = arg; 1247 dsl_pool_t *dp = dmu_tx_pool(tx); 1248 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1249 uint64_t fromguid = drrb->drr_fromguid; 1250 int flags = drrb->drr_flags; 1251 int error; 1252 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1253 dsl_dataset_t *ds; 1254 const char *tofs = drba->drba_cookie->drc_tofs; 1255 1256 /* already checked */ 1257 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1258 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); 1259 1260 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1261 DMU_COMPOUNDSTREAM || 1262 drrb->drr_type >= DMU_OST_NUMTYPES || 1263 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 1264 return (SET_ERROR(EINVAL)); 1265 1266 /* Verify pool version supports SA if SA_SPILL feature set */ 1267 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1268 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1269 return (SET_ERROR(ENOTSUP)); 1270 1271 if (drba->drba_cookie->drc_resumable && 1272 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) 1273 return (SET_ERROR(ENOTSUP)); 1274 1275 /* 1276 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1277 * record to a plan WRITE record, so the pool must have the 1278 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1279 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1280 */ 1281 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1282 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1283 return (SET_ERROR(ENOTSUP)); 1284 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) && 1285 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1286 return (SET_ERROR(ENOTSUP)); 1287 1288 /* 1289 * The receiving code doesn't know how to translate large blocks 1290 * to smaller ones, so the pool must have the LARGE_BLOCKS 1291 * feature enabled if the stream has LARGE_BLOCKS. 1292 */ 1293 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 1294 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) 1295 return (SET_ERROR(ENOTSUP)); 1296 1297 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1298 if (error == 0) { 1299 /* target fs already exists; recv into temp clone */ 1300 1301 /* Can't recv a clone into an existing fs */ 1302 if (flags & DRR_FLAG_CLONE || drba->drba_origin) { 1303 dsl_dataset_rele(ds, FTAG); 1304 return (SET_ERROR(EINVAL)); 1305 } 1306 1307 error = recv_begin_check_existing_impl(drba, ds, fromguid); 1308 dsl_dataset_rele(ds, FTAG); 1309 } else if (error == ENOENT) { 1310 /* target fs does not exist; must be a full backup or clone */ 1311 char buf[ZFS_MAX_DATASET_NAME_LEN]; 1312 1313 /* 1314 * If it's a non-clone incremental, we are missing the 1315 * target fs, so fail the recv. 1316 */ 1317 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE || 1318 drba->drba_origin)) 1319 return (SET_ERROR(ENOENT)); 1320 1321 /* 1322 * If we're receiving a full send as a clone, and it doesn't 1323 * contain all the necessary free records and freeobject 1324 * records, reject it. 1325 */ 1326 if (fromguid == 0 && drba->drba_origin && 1327 !(flags & DRR_FLAG_FREERECORDS)) 1328 return (SET_ERROR(EINVAL)); 1329 1330 /* Open the parent of tofs */ 1331 ASSERT3U(strlen(tofs), <, sizeof (buf)); 1332 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 1333 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 1334 if (error != 0) 1335 return (error); 1336 1337 /* 1338 * Check filesystem and snapshot limits before receiving. We'll 1339 * recheck snapshot limits again at the end (we create the 1340 * filesystems and increment those counts during begin_sync). 1341 */ 1342 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1343 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); 1344 if (error != 0) { 1345 dsl_dataset_rele(ds, FTAG); 1346 return (error); 1347 } 1348 1349 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1350 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); 1351 if (error != 0) { 1352 dsl_dataset_rele(ds, FTAG); 1353 return (error); 1354 } 1355 1356 if (drba->drba_origin != NULL) { 1357 dsl_dataset_t *origin; 1358 error = dsl_dataset_hold(dp, drba->drba_origin, 1359 FTAG, &origin); 1360 if (error != 0) { 1361 dsl_dataset_rele(ds, FTAG); 1362 return (error); 1363 } 1364 if (!origin->ds_is_snapshot) { 1365 dsl_dataset_rele(origin, FTAG); 1366 dsl_dataset_rele(ds, FTAG); 1367 return (SET_ERROR(EINVAL)); 1368 } 1369 if (dsl_dataset_phys(origin)->ds_guid != fromguid && 1370 fromguid != 0) { 1371 dsl_dataset_rele(origin, FTAG); 1372 dsl_dataset_rele(ds, FTAG); 1373 return (SET_ERROR(ENODEV)); 1374 } 1375 dsl_dataset_rele(origin, FTAG); 1376 } 1377 dsl_dataset_rele(ds, FTAG); 1378 error = 0; 1379 } 1380 return (error); 1381 } 1382 1383 static void 1384 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 1385 { 1386 dmu_recv_begin_arg_t *drba = arg; 1387 dsl_pool_t *dp = dmu_tx_pool(tx); 1388 objset_t *mos = dp->dp_meta_objset; 1389 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1390 const char *tofs = drba->drba_cookie->drc_tofs; 1391 dsl_dataset_t *ds, *newds; 1392 uint64_t dsobj; 1393 int error; 1394 uint64_t crflags = 0; 1395 1396 if (drrb->drr_flags & DRR_FLAG_CI_DATA) 1397 crflags |= DS_FLAG_CI_DATASET; 1398 1399 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1400 if (error == 0) { 1401 /* create temporary clone */ 1402 dsl_dataset_t *snap = NULL; 1403 if (drba->drba_snapobj != 0) { 1404 VERIFY0(dsl_dataset_hold_obj(dp, 1405 drba->drba_snapobj, FTAG, &snap)); 1406 } 1407 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 1408 snap, crflags, drba->drba_cred, tx); 1409 if (drba->drba_snapobj != 0) 1410 dsl_dataset_rele(snap, FTAG); 1411 dsl_dataset_rele(ds, FTAG); 1412 } else { 1413 dsl_dir_t *dd; 1414 const char *tail; 1415 dsl_dataset_t *origin = NULL; 1416 1417 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 1418 1419 if (drba->drba_origin != NULL) { 1420 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 1421 FTAG, &origin)); 1422 } 1423 1424 /* Create new dataset. */ 1425 dsobj = dsl_dataset_create_sync(dd, 1426 strrchr(tofs, '/') + 1, 1427 origin, crflags, drba->drba_cred, tx); 1428 if (origin != NULL) 1429 dsl_dataset_rele(origin, FTAG); 1430 dsl_dir_rele(dd, FTAG); 1431 drba->drba_cookie->drc_newfs = B_TRUE; 1432 } 1433 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 1434 1435 if (drba->drba_cookie->drc_resumable) { 1436 dsl_dataset_zapify(newds, tx); 1437 if (drrb->drr_fromguid != 0) { 1438 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, 1439 8, 1, &drrb->drr_fromguid, tx)); 1440 } 1441 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, 1442 8, 1, &drrb->drr_toguid, tx)); 1443 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, 1444 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); 1445 uint64_t one = 1; 1446 uint64_t zero = 0; 1447 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, 1448 8, 1, &one, tx)); 1449 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, 1450 8, 1, &zero, tx)); 1451 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, 1452 8, 1, &zero, tx)); 1453 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1454 DMU_BACKUP_FEATURE_EMBED_DATA) { 1455 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, 1456 8, 1, &one, tx)); 1457 } 1458 } 1459 1460 dmu_buf_will_dirty(newds->ds_dbuf, tx); 1461 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; 1462 1463 /* 1464 * If we actually created a non-clone, we need to create the 1465 * objset in our new dataset. 1466 */ 1467 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 1468 (void) dmu_objset_create_impl(dp->dp_spa, 1469 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 1470 } 1471 1472 drba->drba_cookie->drc_ds = newds; 1473 1474 spa_history_log_internal_ds(newds, "receive", tx, ""); 1475 } 1476 1477 static int 1478 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) 1479 { 1480 dmu_recv_begin_arg_t *drba = arg; 1481 dsl_pool_t *dp = dmu_tx_pool(tx); 1482 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1483 int error; 1484 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1485 dsl_dataset_t *ds; 1486 const char *tofs = drba->drba_cookie->drc_tofs; 1487 1488 /* already checked */ 1489 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1490 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING); 1491 1492 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1493 DMU_COMPOUNDSTREAM || 1494 drrb->drr_type >= DMU_OST_NUMTYPES) 1495 return (SET_ERROR(EINVAL)); 1496 1497 /* Verify pool version supports SA if SA_SPILL feature set */ 1498 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1499 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1500 return (SET_ERROR(ENOTSUP)); 1501 1502 /* 1503 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1504 * record to a plain WRITE record, so the pool must have the 1505 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1506 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1507 */ 1508 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1509 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1510 return (SET_ERROR(ENOTSUP)); 1511 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) && 1512 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1513 return (SET_ERROR(ENOTSUP)); 1514 1515 /* 6 extra bytes for /%recv */ 1516 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1517 1518 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1519 tofs, recv_clone_name); 1520 1521 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1522 /* %recv does not exist; continue in tofs */ 1523 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1524 if (error != 0) 1525 return (error); 1526 } 1527 1528 /* check that ds is marked inconsistent */ 1529 if (!DS_IS_INCONSISTENT(ds)) { 1530 dsl_dataset_rele(ds, FTAG); 1531 return (SET_ERROR(EINVAL)); 1532 } 1533 1534 /* check that there is resuming data, and that the toguid matches */ 1535 if (!dsl_dataset_is_zapified(ds)) { 1536 dsl_dataset_rele(ds, FTAG); 1537 return (SET_ERROR(EINVAL)); 1538 } 1539 uint64_t val; 1540 error = zap_lookup(dp->dp_meta_objset, ds->ds_object, 1541 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); 1542 if (error != 0 || drrb->drr_toguid != val) { 1543 dsl_dataset_rele(ds, FTAG); 1544 return (SET_ERROR(EINVAL)); 1545 } 1546 1547 /* 1548 * Check if the receive is still running. If so, it will be owned. 1549 * Note that nothing else can own the dataset (e.g. after the receive 1550 * fails) because it will be marked inconsistent. 1551 */ 1552 if (dsl_dataset_has_owner(ds)) { 1553 dsl_dataset_rele(ds, FTAG); 1554 return (SET_ERROR(EBUSY)); 1555 } 1556 1557 /* There should not be any snapshots of this fs yet. */ 1558 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { 1559 dsl_dataset_rele(ds, FTAG); 1560 return (SET_ERROR(EINVAL)); 1561 } 1562 1563 /* 1564 * Note: resume point will be checked when we process the first WRITE 1565 * record. 1566 */ 1567 1568 /* check that the origin matches */ 1569 val = 0; 1570 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, 1571 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); 1572 if (drrb->drr_fromguid != val) { 1573 dsl_dataset_rele(ds, FTAG); 1574 return (SET_ERROR(EINVAL)); 1575 } 1576 1577 dsl_dataset_rele(ds, FTAG); 1578 return (0); 1579 } 1580 1581 static void 1582 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) 1583 { 1584 dmu_recv_begin_arg_t *drba = arg; 1585 dsl_pool_t *dp = dmu_tx_pool(tx); 1586 const char *tofs = drba->drba_cookie->drc_tofs; 1587 dsl_dataset_t *ds; 1588 uint64_t dsobj; 1589 /* 6 extra bytes for /%recv */ 1590 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1591 1592 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1593 tofs, recv_clone_name); 1594 1595 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1596 /* %recv does not exist; continue in tofs */ 1597 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds)); 1598 drba->drba_cookie->drc_newfs = B_TRUE; 1599 } 1600 1601 /* clear the inconsistent flag so that we can own it */ 1602 ASSERT(DS_IS_INCONSISTENT(ds)); 1603 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1604 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 1605 dsobj = ds->ds_object; 1606 dsl_dataset_rele(ds, FTAG); 1607 1608 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds)); 1609 1610 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1611 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT; 1612 1613 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))); 1614 1615 drba->drba_cookie->drc_ds = ds; 1616 1617 spa_history_log_internal_ds(ds, "resume receive", tx, ""); 1618 } 1619 1620 /* 1621 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 1622 * succeeds; otherwise we will leak the holds on the datasets. 1623 */ 1624 int 1625 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin, 1626 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc) 1627 { 1628 dmu_recv_begin_arg_t drba = { 0 }; 1629 1630 bzero(drc, sizeof (dmu_recv_cookie_t)); 1631 drc->drc_drr_begin = drr_begin; 1632 drc->drc_drrb = &drr_begin->drr_u.drr_begin; 1633 drc->drc_tosnap = tosnap; 1634 drc->drc_tofs = tofs; 1635 drc->drc_force = force; 1636 drc->drc_resumable = resumable; 1637 drc->drc_cred = CRED(); 1638 1639 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { 1640 drc->drc_byteswap = B_TRUE; 1641 fletcher_4_incremental_byteswap(drr_begin, 1642 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1643 byteswap_record(drr_begin); 1644 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { 1645 fletcher_4_incremental_native(drr_begin, 1646 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1647 } else { 1648 return (SET_ERROR(EINVAL)); 1649 } 1650 1651 drba.drba_origin = origin; 1652 drba.drba_cookie = drc; 1653 drba.drba_cred = CRED(); 1654 1655 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & 1656 DMU_BACKUP_FEATURE_RESUMING) { 1657 return (dsl_sync_task(tofs, 1658 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, 1659 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1660 } else { 1661 return (dsl_sync_task(tofs, 1662 dmu_recv_begin_check, dmu_recv_begin_sync, 1663 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1664 } 1665 } 1666 1667 struct receive_record_arg { 1668 dmu_replay_record_t header; 1669 void *payload; /* Pointer to a buffer containing the payload */ 1670 /* 1671 * If the record is a write, pointer to the arc_buf_t containing the 1672 * payload. 1673 */ 1674 arc_buf_t *write_buf; 1675 int payload_size; 1676 uint64_t bytes_read; /* bytes read from stream when record created */ 1677 boolean_t eos_marker; /* Marks the end of the stream */ 1678 bqueue_node_t node; 1679 }; 1680 1681 struct receive_writer_arg { 1682 objset_t *os; 1683 boolean_t byteswap; 1684 bqueue_t q; 1685 1686 /* 1687 * These three args are used to signal to the main thread that we're 1688 * done. 1689 */ 1690 kmutex_t mutex; 1691 kcondvar_t cv; 1692 boolean_t done; 1693 1694 int err; 1695 /* A map from guid to dataset to help handle dedup'd streams. */ 1696 avl_tree_t *guid_to_ds_map; 1697 boolean_t resumable; 1698 uint64_t last_object, last_offset; 1699 uint64_t bytes_read; /* bytes read when current record created */ 1700 }; 1701 1702 struct objlist { 1703 list_t list; /* List of struct receive_objnode. */ 1704 /* 1705 * Last object looked up. Used to assert that objects are being looked 1706 * up in ascending order. 1707 */ 1708 uint64_t last_lookup; 1709 }; 1710 1711 struct receive_objnode { 1712 list_node_t node; 1713 uint64_t object; 1714 }; 1715 1716 struct receive_arg { 1717 objset_t *os; 1718 vnode_t *vp; /* The vnode to read the stream from */ 1719 uint64_t voff; /* The current offset in the stream */ 1720 uint64_t bytes_read; 1721 /* 1722 * A record that has had its payload read in, but hasn't yet been handed 1723 * off to the worker thread. 1724 */ 1725 struct receive_record_arg *rrd; 1726 /* A record that has had its header read in, but not its payload. */ 1727 struct receive_record_arg *next_rrd; 1728 zio_cksum_t cksum; 1729 zio_cksum_t prev_cksum; 1730 int err; 1731 boolean_t byteswap; 1732 /* Sorted list of objects not to issue prefetches for. */ 1733 struct objlist ignore_objlist; 1734 }; 1735 1736 typedef struct guid_map_entry { 1737 uint64_t guid; 1738 dsl_dataset_t *gme_ds; 1739 avl_node_t avlnode; 1740 } guid_map_entry_t; 1741 1742 static int 1743 guid_compare(const void *arg1, const void *arg2) 1744 { 1745 const guid_map_entry_t *gmep1 = arg1; 1746 const guid_map_entry_t *gmep2 = arg2; 1747 1748 if (gmep1->guid < gmep2->guid) 1749 return (-1); 1750 else if (gmep1->guid > gmep2->guid) 1751 return (1); 1752 return (0); 1753 } 1754 1755 static void 1756 free_guid_map_onexit(void *arg) 1757 { 1758 avl_tree_t *ca = arg; 1759 void *cookie = NULL; 1760 guid_map_entry_t *gmep; 1761 1762 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1763 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1764 dsl_dataset_rele(gmep->gme_ds, gmep); 1765 kmem_free(gmep, sizeof (guid_map_entry_t)); 1766 } 1767 avl_destroy(ca); 1768 kmem_free(ca, sizeof (avl_tree_t)); 1769 } 1770 1771 static int 1772 receive_read(struct receive_arg *ra, int len, void *buf) 1773 { 1774 int done = 0; 1775 1776 /* 1777 * The code doesn't rely on this (lengths being multiples of 8). See 1778 * comment in dump_bytes. 1779 */ 1780 ASSERT0(len % 8); 1781 1782 while (done < len) { 1783 ssize_t resid; 1784 1785 ra->err = vn_rdwr(UIO_READ, ra->vp, 1786 (char *)buf + done, len - done, 1787 ra->voff, UIO_SYSSPACE, FAPPEND, 1788 RLIM64_INFINITY, CRED(), &resid); 1789 1790 if (resid == len - done) { 1791 /* 1792 * Note: ECKSUM indicates that the receive 1793 * was interrupted and can potentially be resumed. 1794 */ 1795 ra->err = SET_ERROR(ECKSUM); 1796 } 1797 ra->voff += len - done - resid; 1798 done = len - resid; 1799 if (ra->err != 0) 1800 return (ra->err); 1801 } 1802 1803 ra->bytes_read += len; 1804 1805 ASSERT3U(done, ==, len); 1806 return (0); 1807 } 1808 1809 static void 1810 byteswap_record(dmu_replay_record_t *drr) 1811 { 1812 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 1813 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 1814 drr->drr_type = BSWAP_32(drr->drr_type); 1815 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 1816 1817 switch (drr->drr_type) { 1818 case DRR_BEGIN: 1819 DO64(drr_begin.drr_magic); 1820 DO64(drr_begin.drr_versioninfo); 1821 DO64(drr_begin.drr_creation_time); 1822 DO32(drr_begin.drr_type); 1823 DO32(drr_begin.drr_flags); 1824 DO64(drr_begin.drr_toguid); 1825 DO64(drr_begin.drr_fromguid); 1826 break; 1827 case DRR_OBJECT: 1828 DO64(drr_object.drr_object); 1829 DO32(drr_object.drr_type); 1830 DO32(drr_object.drr_bonustype); 1831 DO32(drr_object.drr_blksz); 1832 DO32(drr_object.drr_bonuslen); 1833 DO64(drr_object.drr_toguid); 1834 break; 1835 case DRR_FREEOBJECTS: 1836 DO64(drr_freeobjects.drr_firstobj); 1837 DO64(drr_freeobjects.drr_numobjs); 1838 DO64(drr_freeobjects.drr_toguid); 1839 break; 1840 case DRR_WRITE: 1841 DO64(drr_write.drr_object); 1842 DO32(drr_write.drr_type); 1843 DO64(drr_write.drr_offset); 1844 DO64(drr_write.drr_length); 1845 DO64(drr_write.drr_toguid); 1846 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); 1847 DO64(drr_write.drr_key.ddk_prop); 1848 break; 1849 case DRR_WRITE_BYREF: 1850 DO64(drr_write_byref.drr_object); 1851 DO64(drr_write_byref.drr_offset); 1852 DO64(drr_write_byref.drr_length); 1853 DO64(drr_write_byref.drr_toguid); 1854 DO64(drr_write_byref.drr_refguid); 1855 DO64(drr_write_byref.drr_refobject); 1856 DO64(drr_write_byref.drr_refoffset); 1857 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref. 1858 drr_key.ddk_cksum); 1859 DO64(drr_write_byref.drr_key.ddk_prop); 1860 break; 1861 case DRR_WRITE_EMBEDDED: 1862 DO64(drr_write_embedded.drr_object); 1863 DO64(drr_write_embedded.drr_offset); 1864 DO64(drr_write_embedded.drr_length); 1865 DO64(drr_write_embedded.drr_toguid); 1866 DO32(drr_write_embedded.drr_lsize); 1867 DO32(drr_write_embedded.drr_psize); 1868 break; 1869 case DRR_FREE: 1870 DO64(drr_free.drr_object); 1871 DO64(drr_free.drr_offset); 1872 DO64(drr_free.drr_length); 1873 DO64(drr_free.drr_toguid); 1874 break; 1875 case DRR_SPILL: 1876 DO64(drr_spill.drr_object); 1877 DO64(drr_spill.drr_length); 1878 DO64(drr_spill.drr_toguid); 1879 break; 1880 case DRR_END: 1881 DO64(drr_end.drr_toguid); 1882 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); 1883 break; 1884 } 1885 1886 if (drr->drr_type != DRR_BEGIN) { 1887 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); 1888 } 1889 1890 #undef DO64 1891 #undef DO32 1892 } 1893 1894 static inline uint8_t 1895 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) 1896 { 1897 if (bonus_type == DMU_OT_SA) { 1898 return (1); 1899 } else { 1900 return (1 + 1901 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT)); 1902 } 1903 } 1904 1905 static void 1906 save_resume_state(struct receive_writer_arg *rwa, 1907 uint64_t object, uint64_t offset, dmu_tx_t *tx) 1908 { 1909 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1910 1911 if (!rwa->resumable) 1912 return; 1913 1914 /* 1915 * We use ds_resume_bytes[] != 0 to indicate that we need to 1916 * update this on disk, so it must not be 0. 1917 */ 1918 ASSERT(rwa->bytes_read != 0); 1919 1920 /* 1921 * We only resume from write records, which have a valid 1922 * (non-meta-dnode) object number. 1923 */ 1924 ASSERT(object != 0); 1925 1926 /* 1927 * For resuming to work correctly, we must receive records in order, 1928 * sorted by object,offset. This is checked by the callers, but 1929 * assert it here for good measure. 1930 */ 1931 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); 1932 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || 1933 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); 1934 ASSERT3U(rwa->bytes_read, >=, 1935 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); 1936 1937 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; 1938 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; 1939 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; 1940 } 1941 1942 static int 1943 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, 1944 void *data) 1945 { 1946 dmu_object_info_t doi; 1947 dmu_tx_t *tx; 1948 uint64_t object; 1949 int err; 1950 1951 if (drro->drr_type == DMU_OT_NONE || 1952 !DMU_OT_IS_VALID(drro->drr_type) || 1953 !DMU_OT_IS_VALID(drro->drr_bonustype) || 1954 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 1955 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 1956 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 1957 drro->drr_blksz < SPA_MINBLOCKSIZE || 1958 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || 1959 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 1960 return (SET_ERROR(EINVAL)); 1961 } 1962 1963 err = dmu_object_info(rwa->os, drro->drr_object, &doi); 1964 1965 if (err != 0 && err != ENOENT) 1966 return (SET_ERROR(EINVAL)); 1967 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT; 1968 1969 /* 1970 * If we are losing blkptrs or changing the block size this must 1971 * be a new file instance. We must clear out the previous file 1972 * contents before we can change this type of metadata in the dnode. 1973 */ 1974 if (err == 0) { 1975 int nblkptr; 1976 1977 nblkptr = deduce_nblkptr(drro->drr_bonustype, 1978 drro->drr_bonuslen); 1979 1980 if (drro->drr_blksz != doi.doi_data_block_size || 1981 nblkptr < doi.doi_nblkptr) { 1982 err = dmu_free_long_range(rwa->os, drro->drr_object, 1983 0, DMU_OBJECT_END); 1984 if (err != 0) 1985 return (SET_ERROR(EINVAL)); 1986 } 1987 } 1988 1989 tx = dmu_tx_create(rwa->os); 1990 dmu_tx_hold_bonus(tx, object); 1991 err = dmu_tx_assign(tx, TXG_WAIT); 1992 if (err != 0) { 1993 dmu_tx_abort(tx); 1994 return (err); 1995 } 1996 1997 if (object == DMU_NEW_OBJECT) { 1998 /* currently free, want to be allocated */ 1999 err = dmu_object_claim(rwa->os, drro->drr_object, 2000 drro->drr_type, drro->drr_blksz, 2001 drro->drr_bonustype, drro->drr_bonuslen, tx); 2002 } else if (drro->drr_type != doi.doi_type || 2003 drro->drr_blksz != doi.doi_data_block_size || 2004 drro->drr_bonustype != doi.doi_bonus_type || 2005 drro->drr_bonuslen != doi.doi_bonus_size) { 2006 /* currently allocated, but with different properties */ 2007 err = dmu_object_reclaim(rwa->os, drro->drr_object, 2008 drro->drr_type, drro->drr_blksz, 2009 drro->drr_bonustype, drro->drr_bonuslen, tx); 2010 } 2011 if (err != 0) { 2012 dmu_tx_commit(tx); 2013 return (SET_ERROR(EINVAL)); 2014 } 2015 2016 dmu_object_set_checksum(rwa->os, drro->drr_object, 2017 drro->drr_checksumtype, tx); 2018 dmu_object_set_compress(rwa->os, drro->drr_object, 2019 drro->drr_compress, tx); 2020 2021 if (data != NULL) { 2022 dmu_buf_t *db; 2023 2024 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db)); 2025 dmu_buf_will_dirty(db, tx); 2026 2027 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 2028 bcopy(data, db->db_data, drro->drr_bonuslen); 2029 if (rwa->byteswap) { 2030 dmu_object_byteswap_t byteswap = 2031 DMU_OT_BYTESWAP(drro->drr_bonustype); 2032 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 2033 drro->drr_bonuslen); 2034 } 2035 dmu_buf_rele(db, FTAG); 2036 } 2037 dmu_tx_commit(tx); 2038 2039 return (0); 2040 } 2041 2042 /* ARGSUSED */ 2043 static int 2044 receive_freeobjects(struct receive_writer_arg *rwa, 2045 struct drr_freeobjects *drrfo) 2046 { 2047 uint64_t obj; 2048 int next_err = 0; 2049 2050 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 2051 return (SET_ERROR(EINVAL)); 2052 2053 for (obj = drrfo->drr_firstobj; 2054 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0; 2055 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { 2056 int err; 2057 2058 if (dmu_object_info(rwa->os, obj, NULL) != 0) 2059 continue; 2060 2061 err = dmu_free_long_object(rwa->os, obj); 2062 if (err != 0) 2063 return (err); 2064 } 2065 if (next_err != ESRCH) 2066 return (next_err); 2067 return (0); 2068 } 2069 2070 static int 2071 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, 2072 arc_buf_t *abuf) 2073 { 2074 dmu_tx_t *tx; 2075 int err; 2076 2077 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 2078 !DMU_OT_IS_VALID(drrw->drr_type)) 2079 return (SET_ERROR(EINVAL)); 2080 2081 /* 2082 * For resuming to work, records must be in increasing order 2083 * by (object, offset). 2084 */ 2085 if (drrw->drr_object < rwa->last_object || 2086 (drrw->drr_object == rwa->last_object && 2087 drrw->drr_offset < rwa->last_offset)) { 2088 return (SET_ERROR(EINVAL)); 2089 } 2090 rwa->last_object = drrw->drr_object; 2091 rwa->last_offset = drrw->drr_offset; 2092 2093 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0) 2094 return (SET_ERROR(EINVAL)); 2095 2096 tx = dmu_tx_create(rwa->os); 2097 2098 dmu_tx_hold_write(tx, drrw->drr_object, 2099 drrw->drr_offset, drrw->drr_length); 2100 err = dmu_tx_assign(tx, TXG_WAIT); 2101 if (err != 0) { 2102 dmu_tx_abort(tx); 2103 return (err); 2104 } 2105 if (rwa->byteswap) { 2106 dmu_object_byteswap_t byteswap = 2107 DMU_OT_BYTESWAP(drrw->drr_type); 2108 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, 2109 drrw->drr_length); 2110 } 2111 2112 dmu_buf_t *bonus; 2113 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0) 2114 return (SET_ERROR(EINVAL)); 2115 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx); 2116 2117 /* 2118 * Note: If the receive fails, we want the resume stream to start 2119 * with the same record that we last successfully received (as opposed 2120 * to the next record), so that we can verify that we are 2121 * resuming from the correct location. 2122 */ 2123 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); 2124 dmu_tx_commit(tx); 2125 dmu_buf_rele(bonus, FTAG); 2126 2127 return (0); 2128 } 2129 2130 /* 2131 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 2132 * streams to refer to a copy of the data that is already on the 2133 * system because it came in earlier in the stream. This function 2134 * finds the earlier copy of the data, and uses that copy instead of 2135 * data from the stream to fulfill this write. 2136 */ 2137 static int 2138 receive_write_byref(struct receive_writer_arg *rwa, 2139 struct drr_write_byref *drrwbr) 2140 { 2141 dmu_tx_t *tx; 2142 int err; 2143 guid_map_entry_t gmesrch; 2144 guid_map_entry_t *gmep; 2145 avl_index_t where; 2146 objset_t *ref_os = NULL; 2147 dmu_buf_t *dbp; 2148 2149 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 2150 return (SET_ERROR(EINVAL)); 2151 2152 /* 2153 * If the GUID of the referenced dataset is different from the 2154 * GUID of the target dataset, find the referenced dataset. 2155 */ 2156 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 2157 gmesrch.guid = drrwbr->drr_refguid; 2158 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch, 2159 &where)) == NULL) { 2160 return (SET_ERROR(EINVAL)); 2161 } 2162 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 2163 return (SET_ERROR(EINVAL)); 2164 } else { 2165 ref_os = rwa->os; 2166 } 2167 2168 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 2169 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH); 2170 if (err != 0) 2171 return (err); 2172 2173 tx = dmu_tx_create(rwa->os); 2174 2175 dmu_tx_hold_write(tx, drrwbr->drr_object, 2176 drrwbr->drr_offset, drrwbr->drr_length); 2177 err = dmu_tx_assign(tx, TXG_WAIT); 2178 if (err != 0) { 2179 dmu_tx_abort(tx); 2180 return (err); 2181 } 2182 dmu_write(rwa->os, drrwbr->drr_object, 2183 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 2184 dmu_buf_rele(dbp, FTAG); 2185 2186 /* See comment in restore_write. */ 2187 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx); 2188 dmu_tx_commit(tx); 2189 return (0); 2190 } 2191 2192 static int 2193 receive_write_embedded(struct receive_writer_arg *rwa, 2194 struct drr_write_embedded *drrwe, void *data) 2195 { 2196 dmu_tx_t *tx; 2197 int err; 2198 2199 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) 2200 return (EINVAL); 2201 2202 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) 2203 return (EINVAL); 2204 2205 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) 2206 return (EINVAL); 2207 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) 2208 return (EINVAL); 2209 2210 tx = dmu_tx_create(rwa->os); 2211 2212 dmu_tx_hold_write(tx, drrwe->drr_object, 2213 drrwe->drr_offset, drrwe->drr_length); 2214 err = dmu_tx_assign(tx, TXG_WAIT); 2215 if (err != 0) { 2216 dmu_tx_abort(tx); 2217 return (err); 2218 } 2219 2220 dmu_write_embedded(rwa->os, drrwe->drr_object, 2221 drrwe->drr_offset, data, drrwe->drr_etype, 2222 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, 2223 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); 2224 2225 /* See comment in restore_write. */ 2226 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); 2227 dmu_tx_commit(tx); 2228 return (0); 2229 } 2230 2231 static int 2232 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, 2233 void *data) 2234 { 2235 dmu_tx_t *tx; 2236 dmu_buf_t *db, *db_spill; 2237 int err; 2238 2239 if (drrs->drr_length < SPA_MINBLOCKSIZE || 2240 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) 2241 return (SET_ERROR(EINVAL)); 2242 2243 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) 2244 return (SET_ERROR(EINVAL)); 2245 2246 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); 2247 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 2248 dmu_buf_rele(db, FTAG); 2249 return (err); 2250 } 2251 2252 tx = dmu_tx_create(rwa->os); 2253 2254 dmu_tx_hold_spill(tx, db->db_object); 2255 2256 err = dmu_tx_assign(tx, TXG_WAIT); 2257 if (err != 0) { 2258 dmu_buf_rele(db, FTAG); 2259 dmu_buf_rele(db_spill, FTAG); 2260 dmu_tx_abort(tx); 2261 return (err); 2262 } 2263 dmu_buf_will_dirty(db_spill, tx); 2264 2265 if (db_spill->db_size < drrs->drr_length) 2266 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 2267 drrs->drr_length, tx)); 2268 bcopy(data, db_spill->db_data, drrs->drr_length); 2269 2270 dmu_buf_rele(db, FTAG); 2271 dmu_buf_rele(db_spill, FTAG); 2272 2273 dmu_tx_commit(tx); 2274 return (0); 2275 } 2276 2277 /* ARGSUSED */ 2278 static int 2279 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) 2280 { 2281 int err; 2282 2283 if (drrf->drr_length != -1ULL && 2284 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 2285 return (SET_ERROR(EINVAL)); 2286 2287 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) 2288 return (SET_ERROR(EINVAL)); 2289 2290 err = dmu_free_long_range(rwa->os, drrf->drr_object, 2291 drrf->drr_offset, drrf->drr_length); 2292 2293 return (err); 2294 } 2295 2296 /* used to destroy the drc_ds on error */ 2297 static void 2298 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 2299 { 2300 if (drc->drc_resumable) { 2301 /* wait for our resume state to be written to disk */ 2302 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0); 2303 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2304 } else { 2305 char name[ZFS_MAX_DATASET_NAME_LEN]; 2306 dsl_dataset_name(drc->drc_ds, name); 2307 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2308 (void) dsl_destroy_head(name); 2309 } 2310 } 2311 2312 static void 2313 receive_cksum(struct receive_arg *ra, int len, void *buf) 2314 { 2315 if (ra->byteswap) { 2316 fletcher_4_incremental_byteswap(buf, len, &ra->cksum); 2317 } else { 2318 fletcher_4_incremental_native(buf, len, &ra->cksum); 2319 } 2320 } 2321 2322 /* 2323 * Read the payload into a buffer of size len, and update the current record's 2324 * payload field. 2325 * Allocate ra->next_rrd and read the next record's header into 2326 * ra->next_rrd->header. 2327 * Verify checksum of payload and next record. 2328 */ 2329 static int 2330 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf) 2331 { 2332 int err; 2333 2334 if (len != 0) { 2335 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); 2336 err = receive_read(ra, len, buf); 2337 if (err != 0) 2338 return (err); 2339 receive_cksum(ra, len, buf); 2340 2341 /* note: rrd is NULL when reading the begin record's payload */ 2342 if (ra->rrd != NULL) { 2343 ra->rrd->payload = buf; 2344 ra->rrd->payload_size = len; 2345 ra->rrd->bytes_read = ra->bytes_read; 2346 } 2347 } 2348 2349 ra->prev_cksum = ra->cksum; 2350 2351 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); 2352 err = receive_read(ra, sizeof (ra->next_rrd->header), 2353 &ra->next_rrd->header); 2354 ra->next_rrd->bytes_read = ra->bytes_read; 2355 if (err != 0) { 2356 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2357 ra->next_rrd = NULL; 2358 return (err); 2359 } 2360 if (ra->next_rrd->header.drr_type == DRR_BEGIN) { 2361 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2362 ra->next_rrd = NULL; 2363 return (SET_ERROR(EINVAL)); 2364 } 2365 2366 /* 2367 * Note: checksum is of everything up to but not including the 2368 * checksum itself. 2369 */ 2370 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2371 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 2372 receive_cksum(ra, 2373 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2374 &ra->next_rrd->header); 2375 2376 zio_cksum_t cksum_orig = 2377 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2378 zio_cksum_t *cksump = 2379 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2380 2381 if (ra->byteswap) 2382 byteswap_record(&ra->next_rrd->header); 2383 2384 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && 2385 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) { 2386 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2387 ra->next_rrd = NULL; 2388 return (SET_ERROR(ECKSUM)); 2389 } 2390 2391 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig); 2392 2393 return (0); 2394 } 2395 2396 static void 2397 objlist_create(struct objlist *list) 2398 { 2399 list_create(&list->list, sizeof (struct receive_objnode), 2400 offsetof(struct receive_objnode, node)); 2401 list->last_lookup = 0; 2402 } 2403 2404 static void 2405 objlist_destroy(struct objlist *list) 2406 { 2407 for (struct receive_objnode *n = list_remove_head(&list->list); 2408 n != NULL; n = list_remove_head(&list->list)) { 2409 kmem_free(n, sizeof (*n)); 2410 } 2411 list_destroy(&list->list); 2412 } 2413 2414 /* 2415 * This function looks through the objlist to see if the specified object number 2416 * is contained in the objlist. In the process, it will remove all object 2417 * numbers in the list that are smaller than the specified object number. Thus, 2418 * any lookup of an object number smaller than a previously looked up object 2419 * number will always return false; therefore, all lookups should be done in 2420 * ascending order. 2421 */ 2422 static boolean_t 2423 objlist_exists(struct objlist *list, uint64_t object) 2424 { 2425 struct receive_objnode *node = list_head(&list->list); 2426 ASSERT3U(object, >=, list->last_lookup); 2427 list->last_lookup = object; 2428 while (node != NULL && node->object < object) { 2429 VERIFY3P(node, ==, list_remove_head(&list->list)); 2430 kmem_free(node, sizeof (*node)); 2431 node = list_head(&list->list); 2432 } 2433 return (node != NULL && node->object == object); 2434 } 2435 2436 /* 2437 * The objlist is a list of object numbers stored in ascending order. However, 2438 * the insertion of new object numbers does not seek out the correct location to 2439 * store a new object number; instead, it appends it to the list for simplicity. 2440 * Thus, any users must take care to only insert new object numbers in ascending 2441 * order. 2442 */ 2443 static void 2444 objlist_insert(struct objlist *list, uint64_t object) 2445 { 2446 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP); 2447 node->object = object; 2448 #ifdef ZFS_DEBUG 2449 struct receive_objnode *last_object = list_tail(&list->list); 2450 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0); 2451 ASSERT3U(node->object, >, last_objnum); 2452 #endif 2453 list_insert_tail(&list->list, node); 2454 } 2455 2456 /* 2457 * Issue the prefetch reads for any necessary indirect blocks. 2458 * 2459 * We use the object ignore list to tell us whether or not to issue prefetches 2460 * for a given object. We do this for both correctness (in case the blocksize 2461 * of an object has changed) and performance (if the object doesn't exist, don't 2462 * needlessly try to issue prefetches). We also trim the list as we go through 2463 * the stream to prevent it from growing to an unbounded size. 2464 * 2465 * The object numbers within will always be in sorted order, and any write 2466 * records we see will also be in sorted order, but they're not sorted with 2467 * respect to each other (i.e. we can get several object records before 2468 * receiving each object's write records). As a result, once we've reached a 2469 * given object number, we can safely remove any reference to lower object 2470 * numbers in the ignore list. In practice, we receive up to 32 object records 2471 * before receiving write records, so the list can have up to 32 nodes in it. 2472 */ 2473 /* ARGSUSED */ 2474 static void 2475 receive_read_prefetch(struct receive_arg *ra, 2476 uint64_t object, uint64_t offset, uint64_t length) 2477 { 2478 if (!objlist_exists(&ra->ignore_objlist, object)) { 2479 dmu_prefetch(ra->os, object, 1, offset, length, 2480 ZIO_PRIORITY_SYNC_READ); 2481 } 2482 } 2483 2484 /* 2485 * Read records off the stream, issuing any necessary prefetches. 2486 */ 2487 static int 2488 receive_read_record(struct receive_arg *ra) 2489 { 2490 int err; 2491 2492 switch (ra->rrd->header.drr_type) { 2493 case DRR_OBJECT: 2494 { 2495 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object; 2496 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8); 2497 void *buf = kmem_zalloc(size, KM_SLEEP); 2498 dmu_object_info_t doi; 2499 err = receive_read_payload_and_next_header(ra, size, buf); 2500 if (err != 0) { 2501 kmem_free(buf, size); 2502 return (err); 2503 } 2504 err = dmu_object_info(ra->os, drro->drr_object, &doi); 2505 /* 2506 * See receive_read_prefetch for an explanation why we're 2507 * storing this object in the ignore_obj_list. 2508 */ 2509 if (err == ENOENT || 2510 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { 2511 objlist_insert(&ra->ignore_objlist, drro->drr_object); 2512 err = 0; 2513 } 2514 return (err); 2515 } 2516 case DRR_FREEOBJECTS: 2517 { 2518 err = receive_read_payload_and_next_header(ra, 0, NULL); 2519 return (err); 2520 } 2521 case DRR_WRITE: 2522 { 2523 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write; 2524 arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os), 2525 drrw->drr_length); 2526 2527 err = receive_read_payload_and_next_header(ra, 2528 drrw->drr_length, abuf->b_data); 2529 if (err != 0) { 2530 dmu_return_arcbuf(abuf); 2531 return (err); 2532 } 2533 ra->rrd->write_buf = abuf; 2534 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset, 2535 drrw->drr_length); 2536 return (err); 2537 } 2538 case DRR_WRITE_BYREF: 2539 { 2540 struct drr_write_byref *drrwb = 2541 &ra->rrd->header.drr_u.drr_write_byref; 2542 err = receive_read_payload_and_next_header(ra, 0, NULL); 2543 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset, 2544 drrwb->drr_length); 2545 return (err); 2546 } 2547 case DRR_WRITE_EMBEDDED: 2548 { 2549 struct drr_write_embedded *drrwe = 2550 &ra->rrd->header.drr_u.drr_write_embedded; 2551 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); 2552 void *buf = kmem_zalloc(size, KM_SLEEP); 2553 2554 err = receive_read_payload_and_next_header(ra, size, buf); 2555 if (err != 0) { 2556 kmem_free(buf, size); 2557 return (err); 2558 } 2559 2560 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset, 2561 drrwe->drr_length); 2562 return (err); 2563 } 2564 case DRR_FREE: 2565 { 2566 /* 2567 * It might be beneficial to prefetch indirect blocks here, but 2568 * we don't really have the data to decide for sure. 2569 */ 2570 err = receive_read_payload_and_next_header(ra, 0, NULL); 2571 return (err); 2572 } 2573 case DRR_END: 2574 { 2575 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end; 2576 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum)) 2577 return (SET_ERROR(ECKSUM)); 2578 return (0); 2579 } 2580 case DRR_SPILL: 2581 { 2582 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill; 2583 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP); 2584 err = receive_read_payload_and_next_header(ra, drrs->drr_length, 2585 buf); 2586 if (err != 0) 2587 kmem_free(buf, drrs->drr_length); 2588 return (err); 2589 } 2590 default: 2591 return (SET_ERROR(EINVAL)); 2592 } 2593 } 2594 2595 /* 2596 * Commit the records to the pool. 2597 */ 2598 static int 2599 receive_process_record(struct receive_writer_arg *rwa, 2600 struct receive_record_arg *rrd) 2601 { 2602 int err; 2603 2604 /* Processing in order, therefore bytes_read should be increasing. */ 2605 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); 2606 rwa->bytes_read = rrd->bytes_read; 2607 2608 switch (rrd->header.drr_type) { 2609 case DRR_OBJECT: 2610 { 2611 struct drr_object *drro = &rrd->header.drr_u.drr_object; 2612 err = receive_object(rwa, drro, rrd->payload); 2613 kmem_free(rrd->payload, rrd->payload_size); 2614 rrd->payload = NULL; 2615 return (err); 2616 } 2617 case DRR_FREEOBJECTS: 2618 { 2619 struct drr_freeobjects *drrfo = 2620 &rrd->header.drr_u.drr_freeobjects; 2621 return (receive_freeobjects(rwa, drrfo)); 2622 } 2623 case DRR_WRITE: 2624 { 2625 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2626 err = receive_write(rwa, drrw, rrd->write_buf); 2627 /* if receive_write() is successful, it consumes the arc_buf */ 2628 if (err != 0) 2629 dmu_return_arcbuf(rrd->write_buf); 2630 rrd->write_buf = NULL; 2631 rrd->payload = NULL; 2632 return (err); 2633 } 2634 case DRR_WRITE_BYREF: 2635 { 2636 struct drr_write_byref *drrwbr = 2637 &rrd->header.drr_u.drr_write_byref; 2638 return (receive_write_byref(rwa, drrwbr)); 2639 } 2640 case DRR_WRITE_EMBEDDED: 2641 { 2642 struct drr_write_embedded *drrwe = 2643 &rrd->header.drr_u.drr_write_embedded; 2644 err = receive_write_embedded(rwa, drrwe, rrd->payload); 2645 kmem_free(rrd->payload, rrd->payload_size); 2646 rrd->payload = NULL; 2647 return (err); 2648 } 2649 case DRR_FREE: 2650 { 2651 struct drr_free *drrf = &rrd->header.drr_u.drr_free; 2652 return (receive_free(rwa, drrf)); 2653 } 2654 case DRR_SPILL: 2655 { 2656 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; 2657 err = receive_spill(rwa, drrs, rrd->payload); 2658 kmem_free(rrd->payload, rrd->payload_size); 2659 rrd->payload = NULL; 2660 return (err); 2661 } 2662 default: 2663 return (SET_ERROR(EINVAL)); 2664 } 2665 } 2666 2667 /* 2668 * dmu_recv_stream's worker thread; pull records off the queue, and then call 2669 * receive_process_record When we're done, signal the main thread and exit. 2670 */ 2671 static void 2672 receive_writer_thread(void *arg) 2673 { 2674 struct receive_writer_arg *rwa = arg; 2675 struct receive_record_arg *rrd; 2676 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; 2677 rrd = bqueue_dequeue(&rwa->q)) { 2678 /* 2679 * If there's an error, the main thread will stop putting things 2680 * on the queue, but we need to clear everything in it before we 2681 * can exit. 2682 */ 2683 if (rwa->err == 0) { 2684 rwa->err = receive_process_record(rwa, rrd); 2685 } else if (rrd->write_buf != NULL) { 2686 dmu_return_arcbuf(rrd->write_buf); 2687 rrd->write_buf = NULL; 2688 rrd->payload = NULL; 2689 } else if (rrd->payload != NULL) { 2690 kmem_free(rrd->payload, rrd->payload_size); 2691 rrd->payload = NULL; 2692 } 2693 kmem_free(rrd, sizeof (*rrd)); 2694 } 2695 kmem_free(rrd, sizeof (*rrd)); 2696 mutex_enter(&rwa->mutex); 2697 rwa->done = B_TRUE; 2698 cv_signal(&rwa->cv); 2699 mutex_exit(&rwa->mutex); 2700 } 2701 2702 static int 2703 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl) 2704 { 2705 uint64_t val; 2706 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset; 2707 uint64_t dsobj = dmu_objset_id(ra->os); 2708 uint64_t resume_obj, resume_off; 2709 2710 if (nvlist_lookup_uint64(begin_nvl, 2711 "resume_object", &resume_obj) != 0 || 2712 nvlist_lookup_uint64(begin_nvl, 2713 "resume_offset", &resume_off) != 0) { 2714 return (SET_ERROR(EINVAL)); 2715 } 2716 VERIFY0(zap_lookup(mos, dsobj, 2717 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); 2718 if (resume_obj != val) 2719 return (SET_ERROR(EINVAL)); 2720 VERIFY0(zap_lookup(mos, dsobj, 2721 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); 2722 if (resume_off != val) 2723 return (SET_ERROR(EINVAL)); 2724 2725 return (0); 2726 } 2727 2728 /* 2729 * Read in the stream's records, one by one, and apply them to the pool. There 2730 * are two threads involved; the thread that calls this function will spin up a 2731 * worker thread, read the records off the stream one by one, and issue 2732 * prefetches for any necessary indirect blocks. It will then push the records 2733 * onto an internal blocking queue. The worker thread will pull the records off 2734 * the queue, and actually write the data into the DMU. This way, the worker 2735 * thread doesn't have to wait for reads to complete, since everything it needs 2736 * (the indirect blocks) will be prefetched. 2737 * 2738 * NB: callers *must* call dmu_recv_end() if this succeeds. 2739 */ 2740 int 2741 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp, 2742 int cleanup_fd, uint64_t *action_handlep) 2743 { 2744 int err = 0; 2745 struct receive_arg ra = { 0 }; 2746 struct receive_writer_arg rwa = { 0 }; 2747 int featureflags; 2748 nvlist_t *begin_nvl = NULL; 2749 2750 ra.byteswap = drc->drc_byteswap; 2751 ra.cksum = drc->drc_cksum; 2752 ra.vp = vp; 2753 ra.voff = *voffp; 2754 2755 if (dsl_dataset_is_zapified(drc->drc_ds)) { 2756 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, 2757 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, 2758 sizeof (ra.bytes_read), 1, &ra.bytes_read); 2759 } 2760 2761 objlist_create(&ra.ignore_objlist); 2762 2763 /* these were verified in dmu_recv_begin */ 2764 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 2765 DMU_SUBSTREAM); 2766 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 2767 2768 /* 2769 * Open the objset we are modifying. 2770 */ 2771 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os)); 2772 2773 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); 2774 2775 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 2776 2777 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 2778 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 2779 minor_t minor; 2780 2781 if (cleanup_fd == -1) { 2782 ra.err = SET_ERROR(EBADF); 2783 goto out; 2784 } 2785 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 2786 if (ra.err != 0) { 2787 cleanup_fd = -1; 2788 goto out; 2789 } 2790 2791 if (*action_handlep == 0) { 2792 rwa.guid_to_ds_map = 2793 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 2794 avl_create(rwa.guid_to_ds_map, guid_compare, 2795 sizeof (guid_map_entry_t), 2796 offsetof(guid_map_entry_t, avlnode)); 2797 err = zfs_onexit_add_cb(minor, 2798 free_guid_map_onexit, rwa.guid_to_ds_map, 2799 action_handlep); 2800 if (ra.err != 0) 2801 goto out; 2802 } else { 2803 err = zfs_onexit_cb_data(minor, *action_handlep, 2804 (void **)&rwa.guid_to_ds_map); 2805 if (ra.err != 0) 2806 goto out; 2807 } 2808 2809 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map; 2810 } 2811 2812 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen; 2813 void *payload = NULL; 2814 if (payloadlen != 0) 2815 payload = kmem_alloc(payloadlen, KM_SLEEP); 2816 2817 err = receive_read_payload_and_next_header(&ra, payloadlen, payload); 2818 if (err != 0) { 2819 if (payloadlen != 0) 2820 kmem_free(payload, payloadlen); 2821 goto out; 2822 } 2823 if (payloadlen != 0) { 2824 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP); 2825 kmem_free(payload, payloadlen); 2826 if (err != 0) 2827 goto out; 2828 } 2829 2830 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) { 2831 err = resume_check(&ra, begin_nvl); 2832 if (err != 0) 2833 goto out; 2834 } 2835 2836 (void) bqueue_init(&rwa.q, zfs_recv_queue_length, 2837 offsetof(struct receive_record_arg, node)); 2838 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL); 2839 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL); 2840 rwa.os = ra.os; 2841 rwa.byteswap = drc->drc_byteswap; 2842 rwa.resumable = drc->drc_resumable; 2843 2844 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, curproc, 2845 TS_RUN, minclsyspri); 2846 /* 2847 * We're reading rwa.err without locks, which is safe since we are the 2848 * only reader, and the worker thread is the only writer. It's ok if we 2849 * miss a write for an iteration or two of the loop, since the writer 2850 * thread will keep freeing records we send it until we send it an eos 2851 * marker. 2852 * 2853 * We can leave this loop in 3 ways: First, if rwa.err is 2854 * non-zero. In that case, the writer thread will free the rrd we just 2855 * pushed. Second, if we're interrupted; in that case, either it's the 2856 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd 2857 * has been handed off to the writer thread who will free it. Finally, 2858 * if receive_read_record fails or we're at the end of the stream, then 2859 * we free ra.rrd and exit. 2860 */ 2861 while (rwa.err == 0) { 2862 if (issig(JUSTLOOKING) && issig(FORREAL)) { 2863 err = SET_ERROR(EINTR); 2864 break; 2865 } 2866 2867 ASSERT3P(ra.rrd, ==, NULL); 2868 ra.rrd = ra.next_rrd; 2869 ra.next_rrd = NULL; 2870 /* Allocates and loads header into ra.next_rrd */ 2871 err = receive_read_record(&ra); 2872 2873 if (ra.rrd->header.drr_type == DRR_END || err != 0) { 2874 kmem_free(ra.rrd, sizeof (*ra.rrd)); 2875 ra.rrd = NULL; 2876 break; 2877 } 2878 2879 bqueue_enqueue(&rwa.q, ra.rrd, 2880 sizeof (struct receive_record_arg) + ra.rrd->payload_size); 2881 ra.rrd = NULL; 2882 } 2883 if (ra.next_rrd == NULL) 2884 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP); 2885 ra.next_rrd->eos_marker = B_TRUE; 2886 bqueue_enqueue(&rwa.q, ra.next_rrd, 1); 2887 2888 mutex_enter(&rwa.mutex); 2889 while (!rwa.done) { 2890 cv_wait(&rwa.cv, &rwa.mutex); 2891 } 2892 mutex_exit(&rwa.mutex); 2893 2894 cv_destroy(&rwa.cv); 2895 mutex_destroy(&rwa.mutex); 2896 bqueue_destroy(&rwa.q); 2897 if (err == 0) 2898 err = rwa.err; 2899 2900 out: 2901 nvlist_free(begin_nvl); 2902 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 2903 zfs_onexit_fd_rele(cleanup_fd); 2904 2905 if (err != 0) { 2906 /* 2907 * Clean up references. If receive is not resumable, 2908 * destroy what we created, so we don't leave it in 2909 * the inconsistent state. 2910 */ 2911 dmu_recv_cleanup_ds(drc); 2912 } 2913 2914 *voffp = ra.voff; 2915 objlist_destroy(&ra.ignore_objlist); 2916 return (err); 2917 } 2918 2919 static int 2920 dmu_recv_end_check(void *arg, dmu_tx_t *tx) 2921 { 2922 dmu_recv_cookie_t *drc = arg; 2923 dsl_pool_t *dp = dmu_tx_pool(tx); 2924 int error; 2925 2926 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 2927 2928 if (!drc->drc_newfs) { 2929 dsl_dataset_t *origin_head; 2930 2931 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 2932 if (error != 0) 2933 return (error); 2934 if (drc->drc_force) { 2935 /* 2936 * We will destroy any snapshots in tofs (i.e. before 2937 * origin_head) that are after the origin (which is 2938 * the snap before drc_ds, because drc_ds can not 2939 * have any snaps of its own). 2940 */ 2941 uint64_t obj; 2942 2943 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 2944 while (obj != 2945 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 2946 dsl_dataset_t *snap; 2947 error = dsl_dataset_hold_obj(dp, obj, FTAG, 2948 &snap); 2949 if (error != 0) 2950 break; 2951 if (snap->ds_dir != origin_head->ds_dir) 2952 error = SET_ERROR(EINVAL); 2953 if (error == 0) { 2954 error = dsl_destroy_snapshot_check_impl( 2955 snap, B_FALSE); 2956 } 2957 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 2958 dsl_dataset_rele(snap, FTAG); 2959 if (error != 0) 2960 break; 2961 } 2962 if (error != 0) { 2963 dsl_dataset_rele(origin_head, FTAG); 2964 return (error); 2965 } 2966 } 2967 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 2968 origin_head, drc->drc_force, drc->drc_owner, tx); 2969 if (error != 0) { 2970 dsl_dataset_rele(origin_head, FTAG); 2971 return (error); 2972 } 2973 error = dsl_dataset_snapshot_check_impl(origin_head, 2974 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 2975 dsl_dataset_rele(origin_head, FTAG); 2976 if (error != 0) 2977 return (error); 2978 2979 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 2980 } else { 2981 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 2982 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 2983 } 2984 return (error); 2985 } 2986 2987 static void 2988 dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 2989 { 2990 dmu_recv_cookie_t *drc = arg; 2991 dsl_pool_t *dp = dmu_tx_pool(tx); 2992 2993 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 2994 tx, "snap=%s", drc->drc_tosnap); 2995 2996 if (!drc->drc_newfs) { 2997 dsl_dataset_t *origin_head; 2998 2999 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 3000 &origin_head)); 3001 3002 if (drc->drc_force) { 3003 /* 3004 * Destroy any snapshots of drc_tofs (origin_head) 3005 * after the origin (the snap before drc_ds). 3006 */ 3007 uint64_t obj; 3008 3009 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3010 while (obj != 3011 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3012 dsl_dataset_t *snap; 3013 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 3014 &snap)); 3015 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 3016 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3017 dsl_destroy_snapshot_sync_impl(snap, 3018 B_FALSE, tx); 3019 dsl_dataset_rele(snap, FTAG); 3020 } 3021 } 3022 VERIFY3P(drc->drc_ds->ds_prev, ==, 3023 origin_head->ds_prev); 3024 3025 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 3026 origin_head, tx); 3027 dsl_dataset_snapshot_sync_impl(origin_head, 3028 drc->drc_tosnap, tx); 3029 3030 /* set snapshot's creation time and guid */ 3031 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 3032 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = 3033 drc->drc_drrb->drr_creation_time; 3034 dsl_dataset_phys(origin_head->ds_prev)->ds_guid = 3035 drc->drc_drrb->drr_toguid; 3036 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= 3037 ~DS_FLAG_INCONSISTENT; 3038 3039 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 3040 dsl_dataset_phys(origin_head)->ds_flags &= 3041 ~DS_FLAG_INCONSISTENT; 3042 3043 dsl_dataset_rele(origin_head, FTAG); 3044 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 3045 3046 if (drc->drc_owner != NULL) 3047 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 3048 } else { 3049 dsl_dataset_t *ds = drc->drc_ds; 3050 3051 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 3052 3053 /* set snapshot's creation time and guid */ 3054 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 3055 dsl_dataset_phys(ds->ds_prev)->ds_creation_time = 3056 drc->drc_drrb->drr_creation_time; 3057 dsl_dataset_phys(ds->ds_prev)->ds_guid = 3058 drc->drc_drrb->drr_toguid; 3059 dsl_dataset_phys(ds->ds_prev)->ds_flags &= 3060 ~DS_FLAG_INCONSISTENT; 3061 3062 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3063 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 3064 if (dsl_dataset_has_resume_receive_state(ds)) { 3065 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3066 DS_FIELD_RESUME_FROMGUID, tx); 3067 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3068 DS_FIELD_RESUME_OBJECT, tx); 3069 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3070 DS_FIELD_RESUME_OFFSET, tx); 3071 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3072 DS_FIELD_RESUME_BYTES, tx); 3073 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3074 DS_FIELD_RESUME_TOGUID, tx); 3075 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3076 DS_FIELD_RESUME_TONAME, tx); 3077 } 3078 } 3079 drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; 3080 /* 3081 * Release the hold from dmu_recv_begin. This must be done before 3082 * we return to open context, so that when we free the dataset's dnode, 3083 * we can evict its bonus buffer. 3084 */ 3085 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 3086 drc->drc_ds = NULL; 3087 } 3088 3089 static int 3090 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 3091 { 3092 dsl_pool_t *dp; 3093 dsl_dataset_t *snapds; 3094 guid_map_entry_t *gmep; 3095 int err; 3096 3097 ASSERT(guid_map != NULL); 3098 3099 err = dsl_pool_hold(name, FTAG, &dp); 3100 if (err != 0) 3101 return (err); 3102 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 3103 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 3104 if (err == 0) { 3105 gmep->guid = dsl_dataset_phys(snapds)->ds_guid; 3106 gmep->gme_ds = snapds; 3107 avl_add(guid_map, gmep); 3108 dsl_dataset_long_hold(snapds, gmep); 3109 } else { 3110 kmem_free(gmep, sizeof (*gmep)); 3111 } 3112 3113 dsl_pool_rele(dp, FTAG); 3114 return (err); 3115 } 3116 3117 static int dmu_recv_end_modified_blocks = 3; 3118 3119 static int 3120 dmu_recv_existing_end(dmu_recv_cookie_t *drc) 3121 { 3122 int error; 3123 3124 #ifdef _KERNEL 3125 /* 3126 * We will be destroying the ds; make sure its origin is unmounted if 3127 * necessary. 3128 */ 3129 char name[ZFS_MAX_DATASET_NAME_LEN]; 3130 dsl_dataset_name(drc->drc_ds, name); 3131 zfs_destroy_unmount_origin(name); 3132 #endif 3133 3134 error = dsl_sync_task(drc->drc_tofs, 3135 dmu_recv_end_check, dmu_recv_end_sync, drc, 3136 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL); 3137 3138 if (error != 0) 3139 dmu_recv_cleanup_ds(drc); 3140 return (error); 3141 } 3142 3143 static int 3144 dmu_recv_new_end(dmu_recv_cookie_t *drc) 3145 { 3146 int error; 3147 3148 error = dsl_sync_task(drc->drc_tofs, 3149 dmu_recv_end_check, dmu_recv_end_sync, drc, 3150 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL); 3151 3152 if (error != 0) { 3153 dmu_recv_cleanup_ds(drc); 3154 } else if (drc->drc_guid_to_ds_map != NULL) { 3155 (void) add_ds_to_guidmap(drc->drc_tofs, 3156 drc->drc_guid_to_ds_map, 3157 drc->drc_newsnapobj); 3158 } 3159 return (error); 3160 } 3161 3162 int 3163 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 3164 { 3165 drc->drc_owner = owner; 3166 3167 if (drc->drc_newfs) 3168 return (dmu_recv_new_end(drc)); 3169 else 3170 return (dmu_recv_existing_end(drc)); 3171 } 3172 3173 /* 3174 * Return TRUE if this objset is currently being received into. 3175 */ 3176 boolean_t 3177 dmu_objset_is_receiving(objset_t *os) 3178 { 3179 return (os->os_dsl_dataset != NULL && 3180 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 3181 }