1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 26 */ 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_prop.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/zfs_ioctl.h> 42 #include <sys/zap.h> 43 #include <sys/zio_checksum.h> 44 #include <sys/zfs_znode.h> 45 #include <zfs_fletcher.h> 46 #include <sys/avl.h> 47 #include <sys/ddt.h> 48 #include <sys/zfs_onexit.h> 49 #include <sys/dmu_send.h> 50 #include <sys/dsl_destroy.h> 51 52 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 53 int zfs_send_corrupt_data = B_FALSE; 54 55 static char *dmu_recv_tag = "dmu_recv_tag"; 56 static const char *recv_clone_name = "%recv"; 57 58 static int 59 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 60 { 61 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset; 62 ssize_t resid; /* have to get resid to get detailed errno */ 63 ASSERT0(len % 8); 64 65 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc); 66 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp, 67 (caddr_t)buf, len, 68 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); 69 70 mutex_enter(&ds->ds_sendstream_lock); 71 *dsp->dsa_off += len; 72 mutex_exit(&ds->ds_sendstream_lock); 73 74 return (dsp->dsa_err); 75 } 76 77 static int 78 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 79 uint64_t length) 80 { 81 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 82 83 /* 84 * When we receive a free record, dbuf_free_range() assumes 85 * that the receiving system doesn't have any dbufs in the range 86 * being freed. This is always true because there is a one-record 87 * constraint: we only send one WRITE record for any given 88 * object+offset. We know that the one-record constraint is 89 * true because we always send data in increasing order by 90 * object,offset. 91 * 92 * If the increasing-order constraint ever changes, we should find 93 * another way to assert that the one-record constraint is still 94 * satisfied. 95 */ 96 ASSERT(object > dsp->dsa_last_data_object || 97 (object == dsp->dsa_last_data_object && 98 offset > dsp->dsa_last_data_offset)); 99 100 /* 101 * If we are doing a non-incremental send, then there can't 102 * be any data in the dataset we're receiving into. Therefore 103 * a free record would simply be a no-op. Save space by not 104 * sending it to begin with. 105 */ 106 if (!dsp->dsa_incremental) 107 return (0); 108 109 if (length != -1ULL && offset + length < offset) 110 length = -1ULL; 111 112 /* 113 * If there is a pending op, but it's not PENDING_FREE, push it out, 114 * since free block aggregation can only be done for blocks of the 115 * same type (i.e., DRR_FREE records can only be aggregated with 116 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 117 * aggregated with other DRR_FREEOBJECTS records. 118 */ 119 if (dsp->dsa_pending_op != PENDING_NONE && 120 dsp->dsa_pending_op != PENDING_FREE) { 121 if (dump_bytes(dsp, dsp->dsa_drr, 122 sizeof (dmu_replay_record_t)) != 0) 123 return (SET_ERROR(EINTR)); 124 dsp->dsa_pending_op = PENDING_NONE; 125 } 126 127 if (dsp->dsa_pending_op == PENDING_FREE) { 128 /* 129 * There should never be a PENDING_FREE if length is -1 130 * (because dump_dnode is the only place where this 131 * function is called with a -1, and only after flushing 132 * any pending record). 133 */ 134 ASSERT(length != -1ULL); 135 /* 136 * Check to see whether this free block can be aggregated 137 * with pending one. 138 */ 139 if (drrf->drr_object == object && drrf->drr_offset + 140 drrf->drr_length == offset) { 141 drrf->drr_length += length; 142 return (0); 143 } else { 144 /* not a continuation. Push out pending record */ 145 if (dump_bytes(dsp, dsp->dsa_drr, 146 sizeof (dmu_replay_record_t)) != 0) 147 return (SET_ERROR(EINTR)); 148 dsp->dsa_pending_op = PENDING_NONE; 149 } 150 } 151 /* create a FREE record and make it pending */ 152 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 153 dsp->dsa_drr->drr_type = DRR_FREE; 154 drrf->drr_object = object; 155 drrf->drr_offset = offset; 156 drrf->drr_length = length; 157 drrf->drr_toguid = dsp->dsa_toguid; 158 if (length == -1ULL) { 159 if (dump_bytes(dsp, dsp->dsa_drr, 160 sizeof (dmu_replay_record_t)) != 0) 161 return (SET_ERROR(EINTR)); 162 } else { 163 dsp->dsa_pending_op = PENDING_FREE; 164 } 165 166 return (0); 167 } 168 169 static int 170 dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type, 171 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data) 172 { 173 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 174 175 /* 176 * We send data in increasing object, offset order. 177 * See comment in dump_free() for details. 178 */ 179 ASSERT(object > dsp->dsa_last_data_object || 180 (object == dsp->dsa_last_data_object && 181 offset > dsp->dsa_last_data_offset)); 182 dsp->dsa_last_data_object = object; 183 dsp->dsa_last_data_offset = offset + blksz - 1; 184 185 /* 186 * If there is any kind of pending aggregation (currently either 187 * a grouping of free objects or free blocks), push it out to 188 * the stream, since aggregation can't be done across operations 189 * of different types. 190 */ 191 if (dsp->dsa_pending_op != PENDING_NONE) { 192 if (dump_bytes(dsp, dsp->dsa_drr, 193 sizeof (dmu_replay_record_t)) != 0) 194 return (SET_ERROR(EINTR)); 195 dsp->dsa_pending_op = PENDING_NONE; 196 } 197 /* write a DATA record */ 198 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 199 dsp->dsa_drr->drr_type = DRR_WRITE; 200 drrw->drr_object = object; 201 drrw->drr_type = type; 202 drrw->drr_offset = offset; 203 drrw->drr_length = blksz; 204 drrw->drr_toguid = dsp->dsa_toguid; 205 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 206 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup) 207 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 208 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 209 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 210 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 211 drrw->drr_key.ddk_cksum = bp->blk_cksum; 212 213 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 214 return (SET_ERROR(EINTR)); 215 if (dump_bytes(dsp, data, blksz) != 0) 216 return (SET_ERROR(EINTR)); 217 return (0); 218 } 219 220 static int 221 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 222 { 223 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 224 225 if (dsp->dsa_pending_op != PENDING_NONE) { 226 if (dump_bytes(dsp, dsp->dsa_drr, 227 sizeof (dmu_replay_record_t)) != 0) 228 return (SET_ERROR(EINTR)); 229 dsp->dsa_pending_op = PENDING_NONE; 230 } 231 232 /* write a SPILL record */ 233 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 234 dsp->dsa_drr->drr_type = DRR_SPILL; 235 drrs->drr_object = object; 236 drrs->drr_length = blksz; 237 drrs->drr_toguid = dsp->dsa_toguid; 238 239 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t))) 240 return (SET_ERROR(EINTR)); 241 if (dump_bytes(dsp, data, blksz)) 242 return (SET_ERROR(EINTR)); 243 return (0); 244 } 245 246 static int 247 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 248 { 249 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 250 251 /* See comment in dump_free(). */ 252 if (!dsp->dsa_incremental) 253 return (0); 254 255 /* 256 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 257 * push it out, since free block aggregation can only be done for 258 * blocks of the same type (i.e., DRR_FREE records can only be 259 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 260 * can only be aggregated with other DRR_FREEOBJECTS records. 261 */ 262 if (dsp->dsa_pending_op != PENDING_NONE && 263 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 264 if (dump_bytes(dsp, dsp->dsa_drr, 265 sizeof (dmu_replay_record_t)) != 0) 266 return (SET_ERROR(EINTR)); 267 dsp->dsa_pending_op = PENDING_NONE; 268 } 269 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 270 /* 271 * See whether this free object array can be aggregated 272 * with pending one 273 */ 274 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 275 drrfo->drr_numobjs += numobjs; 276 return (0); 277 } else { 278 /* can't be aggregated. Push out pending record */ 279 if (dump_bytes(dsp, dsp->dsa_drr, 280 sizeof (dmu_replay_record_t)) != 0) 281 return (SET_ERROR(EINTR)); 282 dsp->dsa_pending_op = PENDING_NONE; 283 } 284 } 285 286 /* write a FREEOBJECTS record */ 287 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 288 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 289 drrfo->drr_firstobj = firstobj; 290 drrfo->drr_numobjs = numobjs; 291 drrfo->drr_toguid = dsp->dsa_toguid; 292 293 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 294 295 return (0); 296 } 297 298 static int 299 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 300 { 301 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 302 303 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 304 return (dump_freeobjects(dsp, object, 1)); 305 306 if (dsp->dsa_pending_op != PENDING_NONE) { 307 if (dump_bytes(dsp, dsp->dsa_drr, 308 sizeof (dmu_replay_record_t)) != 0) 309 return (SET_ERROR(EINTR)); 310 dsp->dsa_pending_op = PENDING_NONE; 311 } 312 313 /* write an OBJECT record */ 314 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 315 dsp->dsa_drr->drr_type = DRR_OBJECT; 316 drro->drr_object = object; 317 drro->drr_type = dnp->dn_type; 318 drro->drr_bonustype = dnp->dn_bonustype; 319 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 320 drro->drr_bonuslen = dnp->dn_bonuslen; 321 drro->drr_checksumtype = dnp->dn_checksum; 322 drro->drr_compress = dnp->dn_compress; 323 drro->drr_toguid = dsp->dsa_toguid; 324 325 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 326 return (SET_ERROR(EINTR)); 327 328 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) 329 return (SET_ERROR(EINTR)); 330 331 /* Free anything past the end of the file. */ 332 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 333 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 334 return (SET_ERROR(EINTR)); 335 if (dsp->dsa_err != 0) 336 return (SET_ERROR(EINTR)); 337 return (0); 338 } 339 340 #define BP_SPAN(dnp, level) \ 341 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ 342 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) 343 344 /* ARGSUSED */ 345 static int 346 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 347 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 348 { 349 dmu_sendarg_t *dsp = arg; 350 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 351 int err = 0; 352 353 if (issig(JUSTLOOKING) && issig(FORREAL)) 354 return (SET_ERROR(EINTR)); 355 356 if (zb->zb_object != DMU_META_DNODE_OBJECT && 357 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 358 return (0); 359 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) { 360 uint64_t span = BP_SPAN(dnp, zb->zb_level); 361 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 362 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT); 363 } else if (bp == NULL) { 364 uint64_t span = BP_SPAN(dnp, zb->zb_level); 365 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span); 366 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 367 return (0); 368 } else if (type == DMU_OT_DNODE) { 369 dnode_phys_t *blk; 370 int i; 371 int blksz = BP_GET_LSIZE(bp); 372 uint32_t aflags = ARC_WAIT; 373 arc_buf_t *abuf; 374 375 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 376 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 377 &aflags, zb) != 0) 378 return (SET_ERROR(EIO)); 379 380 blk = abuf->b_data; 381 for (i = 0; i < blksz >> DNODE_SHIFT; i++) { 382 uint64_t dnobj = (zb->zb_blkid << 383 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; 384 err = dump_dnode(dsp, dnobj, blk+i); 385 if (err != 0) 386 break; 387 } 388 (void) arc_buf_remove_ref(abuf, &abuf); 389 } else if (type == DMU_OT_SA) { 390 uint32_t aflags = ARC_WAIT; 391 arc_buf_t *abuf; 392 int blksz = BP_GET_LSIZE(bp); 393 394 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 395 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 396 &aflags, zb) != 0) 397 return (SET_ERROR(EIO)); 398 399 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data); 400 (void) arc_buf_remove_ref(abuf, &abuf); 401 } else { /* it's a level-0 block of a regular object */ 402 uint32_t aflags = ARC_WAIT; 403 arc_buf_t *abuf; 404 int blksz = BP_GET_LSIZE(bp); 405 406 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 407 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 408 &aflags, zb) != 0) { 409 if (zfs_send_corrupt_data) { 410 /* Send a block filled with 0x"zfs badd bloc" */ 411 abuf = arc_buf_alloc(spa, blksz, &abuf, 412 ARC_BUFC_DATA); 413 uint64_t *ptr; 414 for (ptr = abuf->b_data; 415 (char *)ptr < (char *)abuf->b_data + blksz; 416 ptr++) 417 *ptr = 0x2f5baddb10c; 418 } else { 419 return (SET_ERROR(EIO)); 420 } 421 } 422 423 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz, 424 blksz, bp, abuf->b_data); 425 (void) arc_buf_remove_ref(abuf, &abuf); 426 } 427 428 ASSERT(err == 0 || err == EINTR); 429 return (err); 430 } 431 432 /* 433 * Releases dp, ds, and fromds, using the specified tag. 434 */ 435 static int 436 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds, 437 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off) 438 { 439 objset_t *os; 440 dmu_replay_record_t *drr; 441 dmu_sendarg_t *dsp; 442 int err; 443 uint64_t fromtxg = 0; 444 445 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) { 446 dsl_dataset_rele(fromds, tag); 447 dsl_dataset_rele(ds, tag); 448 dsl_pool_rele(dp, tag); 449 return (SET_ERROR(EXDEV)); 450 } 451 452 err = dmu_objset_from_ds(ds, &os); 453 if (err != 0) { 454 if (fromds != NULL) 455 dsl_dataset_rele(fromds, tag); 456 dsl_dataset_rele(ds, tag); 457 dsl_pool_rele(dp, tag); 458 return (err); 459 } 460 461 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 462 drr->drr_type = DRR_BEGIN; 463 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 464 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 465 DMU_SUBSTREAM); 466 467 #ifdef _KERNEL 468 if (dmu_objset_type(os) == DMU_OST_ZFS) { 469 uint64_t version; 470 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 471 kmem_free(drr, sizeof (dmu_replay_record_t)); 472 if (fromds != NULL) 473 dsl_dataset_rele(fromds, tag); 474 dsl_dataset_rele(ds, tag); 475 dsl_pool_rele(dp, tag); 476 return (SET_ERROR(EINVAL)); 477 } 478 if (version >= ZPL_VERSION_SA) { 479 DMU_SET_FEATUREFLAGS( 480 drr->drr_u.drr_begin.drr_versioninfo, 481 DMU_BACKUP_FEATURE_SA_SPILL); 482 } 483 } 484 #endif 485 486 drr->drr_u.drr_begin.drr_creation_time = 487 ds->ds_phys->ds_creation_time; 488 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 489 if (fromds != NULL && ds->ds_dir != fromds->ds_dir) 490 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 491 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; 492 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 493 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 494 495 if (fromds != NULL) 496 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; 497 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); 498 499 if (fromds != NULL) { 500 fromtxg = fromds->ds_phys->ds_creation_txg; 501 dsl_dataset_rele(fromds, tag); 502 fromds = NULL; 503 } 504 505 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 506 507 dsp->dsa_drr = drr; 508 dsp->dsa_vp = vp; 509 dsp->dsa_outfd = outfd; 510 dsp->dsa_proc = curproc; 511 dsp->dsa_os = os; 512 dsp->dsa_off = off; 513 dsp->dsa_toguid = ds->ds_phys->ds_guid; 514 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0); 515 dsp->dsa_pending_op = PENDING_NONE; 516 dsp->dsa_incremental = (fromtxg != 0); 517 518 mutex_enter(&ds->ds_sendstream_lock); 519 list_insert_head(&ds->ds_sendstreams, dsp); 520 mutex_exit(&ds->ds_sendstream_lock); 521 522 dsl_dataset_long_hold(ds, FTAG); 523 dsl_pool_rele(dp, tag); 524 525 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { 526 err = dsp->dsa_err; 527 goto out; 528 } 529 530 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH, 531 backup_cb, dsp); 532 533 if (dsp->dsa_pending_op != PENDING_NONE) 534 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) 535 err = SET_ERROR(EINTR); 536 537 if (err != 0) { 538 if (err == EINTR && dsp->dsa_err != 0) 539 err = dsp->dsa_err; 540 goto out; 541 } 542 543 bzero(drr, sizeof (dmu_replay_record_t)); 544 drr->drr_type = DRR_END; 545 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 546 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 547 548 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { 549 err = dsp->dsa_err; 550 goto out; 551 } 552 553 out: 554 mutex_enter(&ds->ds_sendstream_lock); 555 list_remove(&ds->ds_sendstreams, dsp); 556 mutex_exit(&ds->ds_sendstream_lock); 557 558 kmem_free(drr, sizeof (dmu_replay_record_t)); 559 kmem_free(dsp, sizeof (dmu_sendarg_t)); 560 561 dsl_dataset_long_rele(ds, FTAG); 562 dsl_dataset_rele(ds, tag); 563 564 return (err); 565 } 566 567 int 568 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 569 int outfd, vnode_t *vp, offset_t *off) 570 { 571 dsl_pool_t *dp; 572 dsl_dataset_t *ds; 573 dsl_dataset_t *fromds = NULL; 574 int err; 575 576 err = dsl_pool_hold(pool, FTAG, &dp); 577 if (err != 0) 578 return (err); 579 580 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 581 if (err != 0) { 582 dsl_pool_rele(dp, FTAG); 583 return (err); 584 } 585 586 if (fromsnap != 0) { 587 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 588 if (err != 0) { 589 dsl_dataset_rele(ds, FTAG); 590 dsl_pool_rele(dp, FTAG); 591 return (err); 592 } 593 } 594 595 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, vp, off)); 596 } 597 598 int 599 dmu_send(const char *tosnap, const char *fromsnap, 600 int outfd, vnode_t *vp, offset_t *off) 601 { 602 dsl_pool_t *dp; 603 dsl_dataset_t *ds; 604 dsl_dataset_t *fromds = NULL; 605 int err; 606 607 if (strchr(tosnap, '@') == NULL) 608 return (SET_ERROR(EINVAL)); 609 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL) 610 return (SET_ERROR(EINVAL)); 611 612 err = dsl_pool_hold(tosnap, FTAG, &dp); 613 if (err != 0) 614 return (err); 615 616 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 617 if (err != 0) { 618 dsl_pool_rele(dp, FTAG); 619 return (err); 620 } 621 622 if (fromsnap != NULL) { 623 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 624 if (err != 0) { 625 dsl_dataset_rele(ds, FTAG); 626 dsl_pool_rele(dp, FTAG); 627 return (err); 628 } 629 } 630 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, vp, off)); 631 } 632 633 int 634 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep) 635 { 636 dsl_pool_t *dp = ds->ds_dir->dd_pool; 637 int err; 638 uint64_t size; 639 640 ASSERT(dsl_pool_config_held(dp)); 641 642 /* tosnap must be a snapshot */ 643 if (!dsl_dataset_is_snapshot(ds)) 644 return (SET_ERROR(EINVAL)); 645 646 /* 647 * fromsnap must be an earlier snapshot from the same fs as tosnap, 648 * or the origin's fs. 649 */ 650 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) 651 return (SET_ERROR(EXDEV)); 652 653 /* Get uncompressed size estimate of changed data. */ 654 if (fromds == NULL) { 655 size = ds->ds_phys->ds_uncompressed_bytes; 656 } else { 657 uint64_t used, comp; 658 err = dsl_dataset_space_written(fromds, ds, 659 &used, &comp, &size); 660 if (err != 0) 661 return (err); 662 } 663 664 /* 665 * Assume that space (both on-disk and in-stream) is dominated by 666 * data. We will adjust for indirect blocks and the copies property, 667 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 668 */ 669 670 /* 671 * Subtract out approximate space used by indirect blocks. 672 * Assume most space is used by data blocks (non-indirect, non-dnode). 673 * Assume all blocks are recordsize. Assume ditto blocks and 674 * internal fragmentation counter out compression. 675 * 676 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 677 * block, which we observe in practice. 678 */ 679 uint64_t recordsize; 680 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize); 681 if (err != 0) 682 return (err); 683 size -= size / recordsize * sizeof (blkptr_t); 684 685 /* Add in the space for the record associated with each block. */ 686 size += size / recordsize * sizeof (dmu_replay_record_t); 687 688 *sizep = size; 689 690 return (0); 691 } 692 693 typedef struct dmu_recv_begin_arg { 694 const char *drba_origin; 695 dmu_recv_cookie_t *drba_cookie; 696 cred_t *drba_cred; 697 uint64_t drba_snapobj; 698 } dmu_recv_begin_arg_t; 699 700 static int 701 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 702 uint64_t fromguid) 703 { 704 uint64_t val; 705 int error; 706 dsl_pool_t *dp = ds->ds_dir->dd_pool; 707 708 /* temporary clone name must not exist */ 709 error = zap_lookup(dp->dp_meta_objset, 710 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name, 711 8, 1, &val); 712 if (error != ENOENT) 713 return (error == 0 ? EBUSY : error); 714 715 /* new snapshot name must not exist */ 716 error = zap_lookup(dp->dp_meta_objset, 717 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap, 718 8, 1, &val); 719 if (error != ENOENT) 720 return (error == 0 ? EEXIST : error); 721 722 if (fromguid != 0) { 723 dsl_dataset_t *snap; 724 uint64_t obj = ds->ds_phys->ds_prev_snap_obj; 725 726 /* Find snapshot in this dir that matches fromguid. */ 727 while (obj != 0) { 728 error = dsl_dataset_hold_obj(dp, obj, FTAG, 729 &snap); 730 if (error != 0) 731 return (SET_ERROR(ENODEV)); 732 if (snap->ds_dir != ds->ds_dir) { 733 dsl_dataset_rele(snap, FTAG); 734 return (SET_ERROR(ENODEV)); 735 } 736 if (snap->ds_phys->ds_guid == fromguid) 737 break; 738 obj = snap->ds_phys->ds_prev_snap_obj; 739 dsl_dataset_rele(snap, FTAG); 740 } 741 if (obj == 0) 742 return (SET_ERROR(ENODEV)); 743 744 if (drba->drba_cookie->drc_force) { 745 drba->drba_snapobj = obj; 746 } else { 747 /* 748 * If we are not forcing, there must be no 749 * changes since fromsnap. 750 */ 751 if (dsl_dataset_modified_since_snap(ds, snap)) { 752 dsl_dataset_rele(snap, FTAG); 753 return (SET_ERROR(ETXTBSY)); 754 } 755 drba->drba_snapobj = ds->ds_prev->ds_object; 756 } 757 758 dsl_dataset_rele(snap, FTAG); 759 } else { 760 /* if full, most recent snapshot must be $ORIGIN */ 761 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL) 762 return (SET_ERROR(ENODEV)); 763 drba->drba_snapobj = ds->ds_phys->ds_prev_snap_obj; 764 } 765 766 return (0); 767 768 } 769 770 static int 771 dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 772 { 773 dmu_recv_begin_arg_t *drba = arg; 774 dsl_pool_t *dp = dmu_tx_pool(tx); 775 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 776 uint64_t fromguid = drrb->drr_fromguid; 777 int flags = drrb->drr_flags; 778 int error; 779 dsl_dataset_t *ds; 780 const char *tofs = drba->drba_cookie->drc_tofs; 781 782 /* already checked */ 783 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 784 785 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 786 DMU_COMPOUNDSTREAM || 787 drrb->drr_type >= DMU_OST_NUMTYPES || 788 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 789 return (SET_ERROR(EINVAL)); 790 791 /* Verify pool version supports SA if SA_SPILL feature set */ 792 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 793 DMU_BACKUP_FEATURE_SA_SPILL) && 794 spa_version(dp->dp_spa) < SPA_VERSION_SA) { 795 return (SET_ERROR(ENOTSUP)); 796 } 797 798 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 799 if (error == 0) { 800 /* target fs already exists; recv into temp clone */ 801 802 /* Can't recv a clone into an existing fs */ 803 if (flags & DRR_FLAG_CLONE) { 804 dsl_dataset_rele(ds, FTAG); 805 return (SET_ERROR(EINVAL)); 806 } 807 808 error = recv_begin_check_existing_impl(drba, ds, fromguid); 809 dsl_dataset_rele(ds, FTAG); 810 } else if (error == ENOENT) { 811 /* target fs does not exist; must be a full backup or clone */ 812 char buf[MAXNAMELEN]; 813 814 /* 815 * If it's a non-clone incremental, we are missing the 816 * target fs, so fail the recv. 817 */ 818 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE)) 819 return (SET_ERROR(ENOENT)); 820 821 /* Open the parent of tofs */ 822 ASSERT3U(strlen(tofs), <, MAXNAMELEN); 823 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 824 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 825 if (error != 0) 826 return (error); 827 828 if (drba->drba_origin != NULL) { 829 dsl_dataset_t *origin; 830 error = dsl_dataset_hold(dp, drba->drba_origin, 831 FTAG, &origin); 832 if (error != 0) { 833 dsl_dataset_rele(ds, FTAG); 834 return (error); 835 } 836 if (!dsl_dataset_is_snapshot(origin)) { 837 dsl_dataset_rele(origin, FTAG); 838 dsl_dataset_rele(ds, FTAG); 839 return (SET_ERROR(EINVAL)); 840 } 841 if (origin->ds_phys->ds_guid != fromguid) { 842 dsl_dataset_rele(origin, FTAG); 843 dsl_dataset_rele(ds, FTAG); 844 return (SET_ERROR(ENODEV)); 845 } 846 dsl_dataset_rele(origin, FTAG); 847 } 848 dsl_dataset_rele(ds, FTAG); 849 error = 0; 850 } 851 return (error); 852 } 853 854 static void 855 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 856 { 857 dmu_recv_begin_arg_t *drba = arg; 858 dsl_pool_t *dp = dmu_tx_pool(tx); 859 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 860 const char *tofs = drba->drba_cookie->drc_tofs; 861 dsl_dataset_t *ds, *newds; 862 uint64_t dsobj; 863 int error; 864 uint64_t crflags; 865 866 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ? 867 DS_FLAG_CI_DATASET : 0; 868 869 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 870 if (error == 0) { 871 /* create temporary clone */ 872 dsl_dataset_t *snap = NULL; 873 if (drba->drba_snapobj != 0) { 874 VERIFY0(dsl_dataset_hold_obj(dp, 875 drba->drba_snapobj, FTAG, &snap)); 876 } 877 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 878 snap, crflags, drba->drba_cred, tx); 879 dsl_dataset_rele(snap, FTAG); 880 dsl_dataset_rele(ds, FTAG); 881 } else { 882 dsl_dir_t *dd; 883 const char *tail; 884 dsl_dataset_t *origin = NULL; 885 886 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 887 888 if (drba->drba_origin != NULL) { 889 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 890 FTAG, &origin)); 891 } 892 893 /* Create new dataset. */ 894 dsobj = dsl_dataset_create_sync(dd, 895 strrchr(tofs, '/') + 1, 896 origin, crflags, drba->drba_cred, tx); 897 if (origin != NULL) 898 dsl_dataset_rele(origin, FTAG); 899 dsl_dir_rele(dd, FTAG); 900 drba->drba_cookie->drc_newfs = B_TRUE; 901 } 902 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 903 904 dmu_buf_will_dirty(newds->ds_dbuf, tx); 905 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 906 907 /* 908 * If we actually created a non-clone, we need to create the 909 * objset in our new dataset. 910 */ 911 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 912 (void) dmu_objset_create_impl(dp->dp_spa, 913 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 914 } 915 916 drba->drba_cookie->drc_ds = newds; 917 918 spa_history_log_internal_ds(newds, "receive", tx, ""); 919 } 920 921 /* 922 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 923 * succeeds; otherwise we will leak the holds on the datasets. 924 */ 925 int 926 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb, 927 boolean_t force, char *origin, dmu_recv_cookie_t *drc) 928 { 929 dmu_recv_begin_arg_t drba = { 0 }; 930 dmu_replay_record_t *drr; 931 932 bzero(drc, sizeof (dmu_recv_cookie_t)); 933 drc->drc_drrb = drrb; 934 drc->drc_tosnap = tosnap; 935 drc->drc_tofs = tofs; 936 drc->drc_force = force; 937 938 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 939 drc->drc_byteswap = B_TRUE; 940 else if (drrb->drr_magic != DMU_BACKUP_MAGIC) 941 return (SET_ERROR(EINVAL)); 942 943 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 944 drr->drr_type = DRR_BEGIN; 945 drr->drr_u.drr_begin = *drc->drc_drrb; 946 if (drc->drc_byteswap) { 947 fletcher_4_incremental_byteswap(drr, 948 sizeof (dmu_replay_record_t), &drc->drc_cksum); 949 } else { 950 fletcher_4_incremental_native(drr, 951 sizeof (dmu_replay_record_t), &drc->drc_cksum); 952 } 953 kmem_free(drr, sizeof (dmu_replay_record_t)); 954 955 if (drc->drc_byteswap) { 956 drrb->drr_magic = BSWAP_64(drrb->drr_magic); 957 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo); 958 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); 959 drrb->drr_type = BSWAP_32(drrb->drr_type); 960 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); 961 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); 962 } 963 964 drba.drba_origin = origin; 965 drba.drba_cookie = drc; 966 drba.drba_cred = CRED(); 967 968 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync, 969 &drba, 5)); 970 } 971 972 struct restorearg { 973 int err; 974 boolean_t byteswap; 975 vnode_t *vp; 976 char *buf; 977 uint64_t voff; 978 int bufsize; /* amount of memory allocated for buf */ 979 zio_cksum_t cksum; 980 avl_tree_t *guid_to_ds_map; 981 }; 982 983 typedef struct guid_map_entry { 984 uint64_t guid; 985 dsl_dataset_t *gme_ds; 986 avl_node_t avlnode; 987 } guid_map_entry_t; 988 989 static int 990 guid_compare(const void *arg1, const void *arg2) 991 { 992 const guid_map_entry_t *gmep1 = arg1; 993 const guid_map_entry_t *gmep2 = arg2; 994 995 if (gmep1->guid < gmep2->guid) 996 return (-1); 997 else if (gmep1->guid > gmep2->guid) 998 return (1); 999 return (0); 1000 } 1001 1002 static void 1003 free_guid_map_onexit(void *arg) 1004 { 1005 avl_tree_t *ca = arg; 1006 void *cookie = NULL; 1007 guid_map_entry_t *gmep; 1008 1009 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1010 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1011 dsl_dataset_rele(gmep->gme_ds, gmep); 1012 kmem_free(gmep, sizeof (guid_map_entry_t)); 1013 } 1014 avl_destroy(ca); 1015 kmem_free(ca, sizeof (avl_tree_t)); 1016 } 1017 1018 static void * 1019 restore_read(struct restorearg *ra, int len) 1020 { 1021 void *rv; 1022 int done = 0; 1023 1024 /* some things will require 8-byte alignment, so everything must */ 1025 ASSERT0(len % 8); 1026 1027 while (done < len) { 1028 ssize_t resid; 1029 1030 ra->err = vn_rdwr(UIO_READ, ra->vp, 1031 (caddr_t)ra->buf + done, len - done, 1032 ra->voff, UIO_SYSSPACE, FAPPEND, 1033 RLIM64_INFINITY, CRED(), &resid); 1034 1035 if (resid == len - done) 1036 ra->err = SET_ERROR(EINVAL); 1037 ra->voff += len - done - resid; 1038 done = len - resid; 1039 if (ra->err != 0) 1040 return (NULL); 1041 } 1042 1043 ASSERT3U(done, ==, len); 1044 rv = ra->buf; 1045 if (ra->byteswap) 1046 fletcher_4_incremental_byteswap(rv, len, &ra->cksum); 1047 else 1048 fletcher_4_incremental_native(rv, len, &ra->cksum); 1049 return (rv); 1050 } 1051 1052 static void 1053 backup_byteswap(dmu_replay_record_t *drr) 1054 { 1055 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 1056 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 1057 drr->drr_type = BSWAP_32(drr->drr_type); 1058 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 1059 switch (drr->drr_type) { 1060 case DRR_BEGIN: 1061 DO64(drr_begin.drr_magic); 1062 DO64(drr_begin.drr_versioninfo); 1063 DO64(drr_begin.drr_creation_time); 1064 DO32(drr_begin.drr_type); 1065 DO32(drr_begin.drr_flags); 1066 DO64(drr_begin.drr_toguid); 1067 DO64(drr_begin.drr_fromguid); 1068 break; 1069 case DRR_OBJECT: 1070 DO64(drr_object.drr_object); 1071 /* DO64(drr_object.drr_allocation_txg); */ 1072 DO32(drr_object.drr_type); 1073 DO32(drr_object.drr_bonustype); 1074 DO32(drr_object.drr_blksz); 1075 DO32(drr_object.drr_bonuslen); 1076 DO64(drr_object.drr_toguid); 1077 break; 1078 case DRR_FREEOBJECTS: 1079 DO64(drr_freeobjects.drr_firstobj); 1080 DO64(drr_freeobjects.drr_numobjs); 1081 DO64(drr_freeobjects.drr_toguid); 1082 break; 1083 case DRR_WRITE: 1084 DO64(drr_write.drr_object); 1085 DO32(drr_write.drr_type); 1086 DO64(drr_write.drr_offset); 1087 DO64(drr_write.drr_length); 1088 DO64(drr_write.drr_toguid); 1089 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]); 1090 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]); 1091 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]); 1092 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]); 1093 DO64(drr_write.drr_key.ddk_prop); 1094 break; 1095 case DRR_WRITE_BYREF: 1096 DO64(drr_write_byref.drr_object); 1097 DO64(drr_write_byref.drr_offset); 1098 DO64(drr_write_byref.drr_length); 1099 DO64(drr_write_byref.drr_toguid); 1100 DO64(drr_write_byref.drr_refguid); 1101 DO64(drr_write_byref.drr_refobject); 1102 DO64(drr_write_byref.drr_refoffset); 1103 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]); 1104 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]); 1105 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]); 1106 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]); 1107 DO64(drr_write_byref.drr_key.ddk_prop); 1108 break; 1109 case DRR_FREE: 1110 DO64(drr_free.drr_object); 1111 DO64(drr_free.drr_offset); 1112 DO64(drr_free.drr_length); 1113 DO64(drr_free.drr_toguid); 1114 break; 1115 case DRR_SPILL: 1116 DO64(drr_spill.drr_object); 1117 DO64(drr_spill.drr_length); 1118 DO64(drr_spill.drr_toguid); 1119 break; 1120 case DRR_END: 1121 DO64(drr_end.drr_checksum.zc_word[0]); 1122 DO64(drr_end.drr_checksum.zc_word[1]); 1123 DO64(drr_end.drr_checksum.zc_word[2]); 1124 DO64(drr_end.drr_checksum.zc_word[3]); 1125 DO64(drr_end.drr_toguid); 1126 break; 1127 } 1128 #undef DO64 1129 #undef DO32 1130 } 1131 1132 static int 1133 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) 1134 { 1135 int err; 1136 dmu_tx_t *tx; 1137 void *data = NULL; 1138 1139 if (drro->drr_type == DMU_OT_NONE || 1140 !DMU_OT_IS_VALID(drro->drr_type) || 1141 !DMU_OT_IS_VALID(drro->drr_bonustype) || 1142 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 1143 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 1144 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 1145 drro->drr_blksz < SPA_MINBLOCKSIZE || 1146 drro->drr_blksz > SPA_MAXBLOCKSIZE || 1147 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 1148 return (SET_ERROR(EINVAL)); 1149 } 1150 1151 err = dmu_object_info(os, drro->drr_object, NULL); 1152 1153 if (err != 0 && err != ENOENT) 1154 return (SET_ERROR(EINVAL)); 1155 1156 if (drro->drr_bonuslen) { 1157 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); 1158 if (ra->err != 0) 1159 return (ra->err); 1160 } 1161 1162 if (err == ENOENT) { 1163 /* currently free, want to be allocated */ 1164 tx = dmu_tx_create(os); 1165 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1166 err = dmu_tx_assign(tx, TXG_WAIT); 1167 if (err != 0) { 1168 dmu_tx_abort(tx); 1169 return (err); 1170 } 1171 err = dmu_object_claim(os, drro->drr_object, 1172 drro->drr_type, drro->drr_blksz, 1173 drro->drr_bonustype, drro->drr_bonuslen, tx); 1174 dmu_tx_commit(tx); 1175 } else { 1176 /* currently allocated, want to be allocated */ 1177 err = dmu_object_reclaim(os, drro->drr_object, 1178 drro->drr_type, drro->drr_blksz, 1179 drro->drr_bonustype, drro->drr_bonuslen); 1180 } 1181 if (err != 0) { 1182 return (SET_ERROR(EINVAL)); 1183 } 1184 1185 tx = dmu_tx_create(os); 1186 dmu_tx_hold_bonus(tx, drro->drr_object); 1187 err = dmu_tx_assign(tx, TXG_WAIT); 1188 if (err != 0) { 1189 dmu_tx_abort(tx); 1190 return (err); 1191 } 1192 1193 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype, 1194 tx); 1195 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); 1196 1197 if (data != NULL) { 1198 dmu_buf_t *db; 1199 1200 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); 1201 dmu_buf_will_dirty(db, tx); 1202 1203 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 1204 bcopy(data, db->db_data, drro->drr_bonuslen); 1205 if (ra->byteswap) { 1206 dmu_object_byteswap_t byteswap = 1207 DMU_OT_BYTESWAP(drro->drr_bonustype); 1208 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 1209 drro->drr_bonuslen); 1210 } 1211 dmu_buf_rele(db, FTAG); 1212 } 1213 dmu_tx_commit(tx); 1214 return (0); 1215 } 1216 1217 /* ARGSUSED */ 1218 static int 1219 restore_freeobjects(struct restorearg *ra, objset_t *os, 1220 struct drr_freeobjects *drrfo) 1221 { 1222 uint64_t obj; 1223 1224 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 1225 return (SET_ERROR(EINVAL)); 1226 1227 for (obj = drrfo->drr_firstobj; 1228 obj < drrfo->drr_firstobj + drrfo->drr_numobjs; 1229 (void) dmu_object_next(os, &obj, FALSE, 0)) { 1230 int err; 1231 1232 if (dmu_object_info(os, obj, NULL) != 0) 1233 continue; 1234 1235 err = dmu_free_long_object(os, obj); 1236 if (err != 0) 1237 return (err); 1238 } 1239 return (0); 1240 } 1241 1242 static int 1243 restore_write(struct restorearg *ra, objset_t *os, 1244 struct drr_write *drrw) 1245 { 1246 dmu_tx_t *tx; 1247 void *data; 1248 int err; 1249 1250 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 1251 !DMU_OT_IS_VALID(drrw->drr_type)) 1252 return (SET_ERROR(EINVAL)); 1253 1254 data = restore_read(ra, drrw->drr_length); 1255 if (data == NULL) 1256 return (ra->err); 1257 1258 if (dmu_object_info(os, drrw->drr_object, NULL) != 0) 1259 return (SET_ERROR(EINVAL)); 1260 1261 tx = dmu_tx_create(os); 1262 1263 dmu_tx_hold_write(tx, drrw->drr_object, 1264 drrw->drr_offset, drrw->drr_length); 1265 err = dmu_tx_assign(tx, TXG_WAIT); 1266 if (err != 0) { 1267 dmu_tx_abort(tx); 1268 return (err); 1269 } 1270 if (ra->byteswap) { 1271 dmu_object_byteswap_t byteswap = 1272 DMU_OT_BYTESWAP(drrw->drr_type); 1273 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length); 1274 } 1275 dmu_write(os, drrw->drr_object, 1276 drrw->drr_offset, drrw->drr_length, data, tx); 1277 dmu_tx_commit(tx); 1278 return (0); 1279 } 1280 1281 /* 1282 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 1283 * streams to refer to a copy of the data that is already on the 1284 * system because it came in earlier in the stream. This function 1285 * finds the earlier copy of the data, and uses that copy instead of 1286 * data from the stream to fulfill this write. 1287 */ 1288 static int 1289 restore_write_byref(struct restorearg *ra, objset_t *os, 1290 struct drr_write_byref *drrwbr) 1291 { 1292 dmu_tx_t *tx; 1293 int err; 1294 guid_map_entry_t gmesrch; 1295 guid_map_entry_t *gmep; 1296 avl_index_t where; 1297 objset_t *ref_os = NULL; 1298 dmu_buf_t *dbp; 1299 1300 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 1301 return (SET_ERROR(EINVAL)); 1302 1303 /* 1304 * If the GUID of the referenced dataset is different from the 1305 * GUID of the target dataset, find the referenced dataset. 1306 */ 1307 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 1308 gmesrch.guid = drrwbr->drr_refguid; 1309 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch, 1310 &where)) == NULL) { 1311 return (SET_ERROR(EINVAL)); 1312 } 1313 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 1314 return (SET_ERROR(EINVAL)); 1315 } else { 1316 ref_os = os; 1317 } 1318 1319 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 1320 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH)) 1321 return (err); 1322 1323 tx = dmu_tx_create(os); 1324 1325 dmu_tx_hold_write(tx, drrwbr->drr_object, 1326 drrwbr->drr_offset, drrwbr->drr_length); 1327 err = dmu_tx_assign(tx, TXG_WAIT); 1328 if (err != 0) { 1329 dmu_tx_abort(tx); 1330 return (err); 1331 } 1332 dmu_write(os, drrwbr->drr_object, 1333 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 1334 dmu_buf_rele(dbp, FTAG); 1335 dmu_tx_commit(tx); 1336 return (0); 1337 } 1338 1339 static int 1340 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs) 1341 { 1342 dmu_tx_t *tx; 1343 void *data; 1344 dmu_buf_t *db, *db_spill; 1345 int err; 1346 1347 if (drrs->drr_length < SPA_MINBLOCKSIZE || 1348 drrs->drr_length > SPA_MAXBLOCKSIZE) 1349 return (SET_ERROR(EINVAL)); 1350 1351 data = restore_read(ra, drrs->drr_length); 1352 if (data == NULL) 1353 return (ra->err); 1354 1355 if (dmu_object_info(os, drrs->drr_object, NULL) != 0) 1356 return (SET_ERROR(EINVAL)); 1357 1358 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db)); 1359 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 1360 dmu_buf_rele(db, FTAG); 1361 return (err); 1362 } 1363 1364 tx = dmu_tx_create(os); 1365 1366 dmu_tx_hold_spill(tx, db->db_object); 1367 1368 err = dmu_tx_assign(tx, TXG_WAIT); 1369 if (err != 0) { 1370 dmu_buf_rele(db, FTAG); 1371 dmu_buf_rele(db_spill, FTAG); 1372 dmu_tx_abort(tx); 1373 return (err); 1374 } 1375 dmu_buf_will_dirty(db_spill, tx); 1376 1377 if (db_spill->db_size < drrs->drr_length) 1378 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 1379 drrs->drr_length, tx)); 1380 bcopy(data, db_spill->db_data, drrs->drr_length); 1381 1382 dmu_buf_rele(db, FTAG); 1383 dmu_buf_rele(db_spill, FTAG); 1384 1385 dmu_tx_commit(tx); 1386 return (0); 1387 } 1388 1389 /* ARGSUSED */ 1390 static int 1391 restore_free(struct restorearg *ra, objset_t *os, 1392 struct drr_free *drrf) 1393 { 1394 int err; 1395 1396 if (drrf->drr_length != -1ULL && 1397 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 1398 return (SET_ERROR(EINVAL)); 1399 1400 if (dmu_object_info(os, drrf->drr_object, NULL) != 0) 1401 return (SET_ERROR(EINVAL)); 1402 1403 err = dmu_free_long_range(os, drrf->drr_object, 1404 drrf->drr_offset, drrf->drr_length); 1405 return (err); 1406 } 1407 1408 /* used to destroy the drc_ds on error */ 1409 static void 1410 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 1411 { 1412 char name[MAXNAMELEN]; 1413 dsl_dataset_name(drc->drc_ds, name); 1414 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 1415 (void) dsl_destroy_head(name); 1416 } 1417 1418 /* 1419 * NB: callers *must* call dmu_recv_end() if this succeeds. 1420 */ 1421 int 1422 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp, 1423 int cleanup_fd, uint64_t *action_handlep) 1424 { 1425 struct restorearg ra = { 0 }; 1426 dmu_replay_record_t *drr; 1427 objset_t *os; 1428 zio_cksum_t pcksum; 1429 int featureflags; 1430 1431 ra.byteswap = drc->drc_byteswap; 1432 ra.cksum = drc->drc_cksum; 1433 ra.vp = vp; 1434 ra.voff = *voffp; 1435 ra.bufsize = 1<<20; 1436 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); 1437 1438 /* these were verified in dmu_recv_begin */ 1439 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 1440 DMU_SUBSTREAM); 1441 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 1442 1443 /* 1444 * Open the objset we are modifying. 1445 */ 1446 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os)); 1447 1448 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); 1449 1450 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 1451 1452 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 1453 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 1454 minor_t minor; 1455 1456 if (cleanup_fd == -1) { 1457 ra.err = SET_ERROR(EBADF); 1458 goto out; 1459 } 1460 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 1461 if (ra.err != 0) { 1462 cleanup_fd = -1; 1463 goto out; 1464 } 1465 1466 if (*action_handlep == 0) { 1467 ra.guid_to_ds_map = 1468 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 1469 avl_create(ra.guid_to_ds_map, guid_compare, 1470 sizeof (guid_map_entry_t), 1471 offsetof(guid_map_entry_t, avlnode)); 1472 ra.err = zfs_onexit_add_cb(minor, 1473 free_guid_map_onexit, ra.guid_to_ds_map, 1474 action_handlep); 1475 if (ra.err != 0) 1476 goto out; 1477 } else { 1478 ra.err = zfs_onexit_cb_data(minor, *action_handlep, 1479 (void **)&ra.guid_to_ds_map); 1480 if (ra.err != 0) 1481 goto out; 1482 } 1483 1484 drc->drc_guid_to_ds_map = ra.guid_to_ds_map; 1485 } 1486 1487 /* 1488 * Read records and process them. 1489 */ 1490 pcksum = ra.cksum; 1491 while (ra.err == 0 && 1492 NULL != (drr = restore_read(&ra, sizeof (*drr)))) { 1493 if (issig(JUSTLOOKING) && issig(FORREAL)) { 1494 ra.err = SET_ERROR(EINTR); 1495 goto out; 1496 } 1497 1498 if (ra.byteswap) 1499 backup_byteswap(drr); 1500 1501 switch (drr->drr_type) { 1502 case DRR_OBJECT: 1503 { 1504 /* 1505 * We need to make a copy of the record header, 1506 * because restore_{object,write} may need to 1507 * restore_read(), which will invalidate drr. 1508 */ 1509 struct drr_object drro = drr->drr_u.drr_object; 1510 ra.err = restore_object(&ra, os, &drro); 1511 break; 1512 } 1513 case DRR_FREEOBJECTS: 1514 { 1515 struct drr_freeobjects drrfo = 1516 drr->drr_u.drr_freeobjects; 1517 ra.err = restore_freeobjects(&ra, os, &drrfo); 1518 break; 1519 } 1520 case DRR_WRITE: 1521 { 1522 struct drr_write drrw = drr->drr_u.drr_write; 1523 ra.err = restore_write(&ra, os, &drrw); 1524 break; 1525 } 1526 case DRR_WRITE_BYREF: 1527 { 1528 struct drr_write_byref drrwbr = 1529 drr->drr_u.drr_write_byref; 1530 ra.err = restore_write_byref(&ra, os, &drrwbr); 1531 break; 1532 } 1533 case DRR_FREE: 1534 { 1535 struct drr_free drrf = drr->drr_u.drr_free; 1536 ra.err = restore_free(&ra, os, &drrf); 1537 break; 1538 } 1539 case DRR_END: 1540 { 1541 struct drr_end drre = drr->drr_u.drr_end; 1542 /* 1543 * We compare against the *previous* checksum 1544 * value, because the stored checksum is of 1545 * everything before the DRR_END record. 1546 */ 1547 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) 1548 ra.err = SET_ERROR(ECKSUM); 1549 goto out; 1550 } 1551 case DRR_SPILL: 1552 { 1553 struct drr_spill drrs = drr->drr_u.drr_spill; 1554 ra.err = restore_spill(&ra, os, &drrs); 1555 break; 1556 } 1557 default: 1558 ra.err = SET_ERROR(EINVAL); 1559 goto out; 1560 } 1561 pcksum = ra.cksum; 1562 } 1563 ASSERT(ra.err != 0); 1564 1565 out: 1566 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 1567 zfs_onexit_fd_rele(cleanup_fd); 1568 1569 if (ra.err != 0) { 1570 /* 1571 * destroy what we created, so we don't leave it in the 1572 * inconsistent restoring state. 1573 */ 1574 dmu_recv_cleanup_ds(drc); 1575 } 1576 1577 kmem_free(ra.buf, ra.bufsize); 1578 *voffp = ra.voff; 1579 return (ra.err); 1580 } 1581 1582 static int 1583 dmu_recv_end_check(void *arg, dmu_tx_t *tx) 1584 { 1585 dmu_recv_cookie_t *drc = arg; 1586 dsl_pool_t *dp = dmu_tx_pool(tx); 1587 int error; 1588 1589 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 1590 1591 if (!drc->drc_newfs) { 1592 dsl_dataset_t *origin_head; 1593 1594 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 1595 if (error != 0) 1596 return (error); 1597 if (drc->drc_force) { 1598 /* 1599 * We will destroy any snapshots in tofs (i.e. before 1600 * origin_head) that are after the origin (which is 1601 * the snap before drc_ds, because drc_ds can not 1602 * have any snaps of its own). 1603 */ 1604 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj; 1605 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) { 1606 dsl_dataset_t *snap; 1607 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1608 &snap); 1609 if (error != 0) 1610 return (error); 1611 if (snap->ds_dir != origin_head->ds_dir) 1612 error = SET_ERROR(EINVAL); 1613 if (error == 0) { 1614 error = dsl_destroy_snapshot_check_impl( 1615 snap, B_FALSE); 1616 } 1617 obj = snap->ds_phys->ds_prev_snap_obj; 1618 dsl_dataset_rele(snap, FTAG); 1619 if (error != 0) 1620 return (error); 1621 } 1622 } 1623 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 1624 origin_head, drc->drc_force, drc->drc_owner, tx); 1625 if (error != 0) { 1626 dsl_dataset_rele(origin_head, FTAG); 1627 return (error); 1628 } 1629 error = dsl_dataset_snapshot_check_impl(origin_head, 1630 drc->drc_tosnap, tx, B_TRUE); 1631 dsl_dataset_rele(origin_head, FTAG); 1632 if (error != 0) 1633 return (error); 1634 1635 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 1636 } else { 1637 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 1638 drc->drc_tosnap, tx, B_TRUE); 1639 } 1640 return (error); 1641 } 1642 1643 static void 1644 dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 1645 { 1646 dmu_recv_cookie_t *drc = arg; 1647 dsl_pool_t *dp = dmu_tx_pool(tx); 1648 1649 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 1650 tx, "snap=%s", drc->drc_tosnap); 1651 1652 if (!drc->drc_newfs) { 1653 dsl_dataset_t *origin_head; 1654 1655 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 1656 &origin_head)); 1657 1658 if (drc->drc_force) { 1659 /* 1660 * Destroy any snapshots of drc_tofs (origin_head) 1661 * after the origin (the snap before drc_ds). 1662 */ 1663 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj; 1664 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) { 1665 dsl_dataset_t *snap; 1666 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 1667 &snap)); 1668 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 1669 obj = snap->ds_phys->ds_prev_snap_obj; 1670 dsl_destroy_snapshot_sync_impl(snap, 1671 B_FALSE, tx); 1672 dsl_dataset_rele(snap, FTAG); 1673 } 1674 } 1675 VERIFY3P(drc->drc_ds->ds_prev, ==, 1676 origin_head->ds_prev); 1677 1678 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 1679 origin_head, tx); 1680 dsl_dataset_snapshot_sync_impl(origin_head, 1681 drc->drc_tosnap, tx); 1682 1683 /* set snapshot's creation time and guid */ 1684 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 1685 origin_head->ds_prev->ds_phys->ds_creation_time = 1686 drc->drc_drrb->drr_creation_time; 1687 origin_head->ds_prev->ds_phys->ds_guid = 1688 drc->drc_drrb->drr_toguid; 1689 origin_head->ds_prev->ds_phys->ds_flags &= 1690 ~DS_FLAG_INCONSISTENT; 1691 1692 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 1693 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1694 1695 dsl_dataset_rele(origin_head, FTAG); 1696 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 1697 1698 if (drc->drc_owner != NULL) 1699 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 1700 } else { 1701 dsl_dataset_t *ds = drc->drc_ds; 1702 1703 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 1704 1705 /* set snapshot's creation time and guid */ 1706 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1707 ds->ds_prev->ds_phys->ds_creation_time = 1708 drc->drc_drrb->drr_creation_time; 1709 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid; 1710 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1711 1712 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1713 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1714 } 1715 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj; 1716 /* 1717 * Release the hold from dmu_recv_begin. This must be done before 1718 * we return to open context, so that when we free the dataset's dnode, 1719 * we can evict its bonus buffer. 1720 */ 1721 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 1722 drc->drc_ds = NULL; 1723 } 1724 1725 static int 1726 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 1727 { 1728 dsl_pool_t *dp; 1729 dsl_dataset_t *snapds; 1730 guid_map_entry_t *gmep; 1731 int err; 1732 1733 ASSERT(guid_map != NULL); 1734 1735 err = dsl_pool_hold(name, FTAG, &dp); 1736 if (err != 0) 1737 return (err); 1738 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 1739 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 1740 if (err == 0) { 1741 gmep->guid = snapds->ds_phys->ds_guid; 1742 gmep->gme_ds = snapds; 1743 avl_add(guid_map, gmep); 1744 dsl_dataset_long_hold(snapds, gmep); 1745 } else { 1746 kmem_free(gmep, sizeof (*gmep)); 1747 } 1748 1749 dsl_pool_rele(dp, FTAG); 1750 return (err); 1751 } 1752 1753 static int dmu_recv_end_modified_blocks = 3; 1754 1755 static int 1756 dmu_recv_existing_end(dmu_recv_cookie_t *drc) 1757 { 1758 int error; 1759 char name[MAXNAMELEN]; 1760 1761 #ifdef _KERNEL 1762 /* 1763 * We will be destroying the ds; make sure its origin is unmounted if 1764 * necessary. 1765 */ 1766 dsl_dataset_name(drc->drc_ds, name); 1767 zfs_destroy_unmount_origin(name); 1768 #endif 1769 1770 error = dsl_sync_task(drc->drc_tofs, 1771 dmu_recv_end_check, dmu_recv_end_sync, drc, 1772 dmu_recv_end_modified_blocks); 1773 1774 if (error != 0) 1775 dmu_recv_cleanup_ds(drc); 1776 return (error); 1777 } 1778 1779 static int 1780 dmu_recv_new_end(dmu_recv_cookie_t *drc) 1781 { 1782 int error; 1783 1784 error = dsl_sync_task(drc->drc_tofs, 1785 dmu_recv_end_check, dmu_recv_end_sync, drc, 1786 dmu_recv_end_modified_blocks); 1787 1788 if (error != 0) { 1789 dmu_recv_cleanup_ds(drc); 1790 } else if (drc->drc_guid_to_ds_map != NULL) { 1791 (void) add_ds_to_guidmap(drc->drc_tofs, 1792 drc->drc_guid_to_ds_map, 1793 drc->drc_newsnapobj); 1794 } 1795 return (error); 1796 } 1797 1798 int 1799 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 1800 { 1801 drc->drc_owner = owner; 1802 1803 if (drc->drc_newfs) 1804 return (dmu_recv_new_end(drc)); 1805 else 1806 return (dmu_recv_existing_end(drc)); 1807 } 1808 1809 /* 1810 * Return TRUE if this objset is currently being received into. 1811 */ 1812 boolean_t 1813 dmu_objset_is_receiving(objset_t *os) 1814 { 1815 return (os->os_dsl_dataset != NULL && 1816 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 1817 }