370 ARC_BUFC_DATA);
371 uint64_t *ptr;
372 for (ptr = abuf->b_data;
373 (char *)ptr < (char *)abuf->b_data + blksz;
374 ptr++)
375 *ptr = 0x2f5baddb10c;
376 } else {
377 return (EIO);
378 }
379 }
380
381 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
382 blksz, bp, abuf->b_data);
383 (void) arc_buf_remove_ref(abuf, &abuf);
384 }
385
386 ASSERT(err == 0 || err == EINTR);
387 return (err);
388 }
389
390 int
391 dmu_send(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
392 int outfd, vnode_t *vp, offset_t *off)
393 {
394 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
395 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
396 dmu_replay_record_t *drr;
397 dmu_sendarg_t *dsp;
398 int err;
399 uint64_t fromtxg = 0;
400
401 /* tosnap must be a snapshot */
402 if (ds->ds_phys->ds_next_snap_obj == 0)
403 return (EINVAL);
404
405 /* fromsnap must be an earlier snapshot from the same fs as tosnap */
406 if (fromds && (ds->ds_dir != fromds->ds_dir ||
407 fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
408 return (EXDEV);
409
410 if (fromorigin) {
411 dsl_pool_t *dp = ds->ds_dir->dd_pool;
412
413 if (fromsnap)
414 return (EINVAL);
415
416 if (dsl_dir_is_clone(ds->ds_dir)) {
417 rw_enter(&dp->dp_config_rwlock, RW_READER);
418 err = dsl_dataset_hold_obj(dp,
419 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
420 rw_exit(&dp->dp_config_rwlock);
421 if (err)
422 return (err);
423 } else {
424 fromorigin = B_FALSE;
425 }
426 }
427
428
429 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
430 drr->drr_type = DRR_BEGIN;
431 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
432 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
433 DMU_SUBSTREAM);
434
435 #ifdef _KERNEL
436 if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
437 uint64_t version;
438 if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0) {
439 kmem_free(drr, sizeof (dmu_replay_record_t));
440 return (EINVAL);
441 }
442 if (version == ZPL_VERSION_SA) {
443 DMU_SET_FEATUREFLAGS(
444 drr->drr_u.drr_begin.drr_versioninfo,
445 DMU_BACKUP_FEATURE_SA_SPILL);
446 }
447 }
448 #endif
449
450 drr->drr_u.drr_begin.drr_creation_time =
451 ds->ds_phys->ds_creation_time;
452 drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
453 if (fromorigin)
454 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
455 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
456 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
457 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
458
459 if (fromds)
460 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
461 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
462
463 if (fromds)
464 fromtxg = fromds->ds_phys->ds_creation_txg;
465 if (fromorigin)
466 dsl_dataset_rele(fromds, FTAG);
467
468 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
469
470 dsp->dsa_drr = drr;
471 dsp->dsa_vp = vp;
472 dsp->dsa_outfd = outfd;
473 dsp->dsa_proc = curproc;
474 dsp->dsa_os = tosnap;
475 dsp->dsa_off = off;
476 dsp->dsa_toguid = ds->ds_phys->ds_guid;
477 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
478 dsp->dsa_pending_op = PENDING_NONE;
479
480 mutex_enter(&ds->ds_sendstream_lock);
481 list_insert_head(&ds->ds_sendstreams, dsp);
482 mutex_exit(&ds->ds_sendstream_lock);
483
484 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
485 err = dsp->dsa_err;
486 goto out;
504 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
505 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
506
507 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
508 err = dsp->dsa_err;
509 goto out;
510 }
511
512 out:
513 mutex_enter(&ds->ds_sendstream_lock);
514 list_remove(&ds->ds_sendstreams, dsp);
515 mutex_exit(&ds->ds_sendstream_lock);
516
517 kmem_free(drr, sizeof (dmu_replay_record_t));
518 kmem_free(dsp, sizeof (dmu_sendarg_t));
519
520 return (err);
521 }
522
523 int
524 dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
525 uint64_t *sizep)
526 {
527 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
528 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
529 dsl_pool_t *dp = ds->ds_dir->dd_pool;
530 int err;
531 uint64_t size;
532
533 /* tosnap must be a snapshot */
534 if (ds->ds_phys->ds_next_snap_obj == 0)
535 return (EINVAL);
536
537 /* fromsnap must be an earlier snapshot from the same fs as tosnap */
538 if (fromds && (ds->ds_dir != fromds->ds_dir ||
539 fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
540 return (EXDEV);
541
542 if (fromorigin) {
543 if (fromsnap)
544 return (EINVAL);
545
546 if (dsl_dir_is_clone(ds->ds_dir)) {
547 rw_enter(&dp->dp_config_rwlock, RW_READER);
548 err = dsl_dataset_hold_obj(dp,
549 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
550 rw_exit(&dp->dp_config_rwlock);
551 if (err)
552 return (err);
553 } else {
554 fromorigin = B_FALSE;
555 }
556 }
557
558 /* Get uncompressed size estimate of changed data. */
559 if (fromds == NULL) {
560 size = ds->ds_phys->ds_uncompressed_bytes;
561 } else {
562 uint64_t used, comp;
563 err = dsl_dataset_space_written(fromds, ds,
564 &used, &comp, &size);
565 if (fromorigin)
566 dsl_dataset_rele(fromds, FTAG);
567 if (err)
568 return (err);
569 }
570
571 /*
572 * Assume that space (both on-disk and in-stream) is dominated by
573 * data. We will adjust for indirect blocks and the copies property,
574 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
575 */
576
577 /*
578 * Subtract out approximate space used by indirect blocks.
579 * Assume most space is used by data blocks (non-indirect, non-dnode).
580 * Assume all blocks are recordsize. Assume ditto blocks and
581 * internal fragmentation counter out compression.
582 *
583 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
584 * block, which we observe in practice.
585 */
586 uint64_t recordsize;
645
646 static void
647 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
648 {
649 dsl_dir_t *dd = arg1;
650 struct recvbeginsyncarg *rbsa = arg2;
651 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
652 uint64_t dsobj;
653
654 /* Create and open new dataset. */
655 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
656 rbsa->origin, flags, rbsa->cr, tx);
657 VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
658 B_TRUE, dmu_recv_tag, &rbsa->ds));
659
660 if (rbsa->origin == NULL) {
661 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
662 rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
663 }
664
665 spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC,
666 dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj);
667 }
668
669 /* ARGSUSED */
670 static int
671 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
672 {
673 dsl_dataset_t *ds = arg1;
674 struct recvbeginsyncarg *rbsa = arg2;
675 int err;
676 uint64_t val;
677
678 /* must not have any changes since most recent snapshot */
679 if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
680 return (ETXTBSY);
681
682 /* new snapshot name must not exist */
683 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
684 ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
685 if (err == 0)
686 return (EEXIST);
747 dsl_dataset_t *cds;
748 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
749 uint64_t dsobj;
750
751 /* create and open the temporary clone */
752 dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
753 ohds->ds_prev, flags, rbsa->cr, tx);
754 VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
755
756 /*
757 * If we actually created a non-clone, we need to create the
758 * objset in our new dataset.
759 */
760 if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
761 (void) dmu_objset_create_impl(dp->dp_spa,
762 cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
763 }
764
765 rbsa->ds = cds;
766
767 spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC,
768 dp->dp_spa, tx, "dataset = %lld", dsobj);
769 }
770
771 static boolean_t
772 dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
773 {
774 int featureflags;
775
776 featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
777
778 /* Verify pool version supports SA if SA_SPILL feature set */
779 return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
780 (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
781 }
782
783 /*
784 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
785 * succeeds; otherwise we will leak the holds on the datasets.
786 */
787 int
788 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
1556
1557 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1558 }
1559
1560 static void
1561 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1562 {
1563 dsl_dataset_t *ds = arg1;
1564 struct recvendsyncarg *resa = arg2;
1565
1566 dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1567
1568 /* set snapshot's creation time and guid */
1569 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1570 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1571 ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1572 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1573
1574 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1575 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1576 }
1577
1578 static int
1579 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1580 {
1581 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1582 uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1583 dsl_dataset_t *snapds;
1584 guid_map_entry_t *gmep;
1585 int err;
1586
1587 ASSERT(guid_map != NULL);
1588
1589 rw_enter(&dp->dp_config_rwlock, RW_READER);
1590 err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1591 if (err == 0) {
1592 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1593 gmep->guid = snapds->ds_phys->ds_guid;
1594 gmep->gme_ds = snapds;
1595 avl_add(guid_map, gmep);
|
370 ARC_BUFC_DATA);
371 uint64_t *ptr;
372 for (ptr = abuf->b_data;
373 (char *)ptr < (char *)abuf->b_data + blksz;
374 ptr++)
375 *ptr = 0x2f5baddb10c;
376 } else {
377 return (EIO);
378 }
379 }
380
381 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
382 blksz, bp, abuf->b_data);
383 (void) arc_buf_remove_ref(abuf, &abuf);
384 }
385
386 ASSERT(err == 0 || err == EINTR);
387 return (err);
388 }
389
390 /*
391 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
392 * For example, they could both be snapshots of the same filesystem, and
393 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
394 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
395 * filesystem. Or 'earlier' could be the origin's origin.
396 */
397 static boolean_t
398 is_before(dsl_dataset_t *later, dsl_dataset_t *earlier)
399 {
400 dsl_pool_t *dp = later->ds_dir->dd_pool;
401 int error;
402 boolean_t ret;
403 dsl_dataset_t *origin;
404
405 if (earlier->ds_phys->ds_creation_txg >=
406 later->ds_phys->ds_creation_txg)
407 return (B_FALSE);
408
409 if (later->ds_dir == earlier->ds_dir)
410 return (B_TRUE);
411 if (!dsl_dir_is_clone(later->ds_dir))
412 return (B_FALSE);
413
414 rw_enter(&dp->dp_config_rwlock, RW_READER);
415 if (later->ds_dir->dd_phys->dd_origin_obj == earlier->ds_object) {
416 rw_exit(&dp->dp_config_rwlock);
417 return (B_TRUE);
418 }
419 error = dsl_dataset_hold_obj(dp,
420 later->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin);
421 rw_exit(&dp->dp_config_rwlock);
422 if (error != 0)
423 return (B_FALSE);
424 ret = is_before(origin, earlier);
425 dsl_dataset_rele(origin, FTAG);
426 return (ret);
427 }
428
429 int
430 dmu_send(objset_t *tosnap, objset_t *fromsnap, int outfd, vnode_t *vp,
431 offset_t *off)
432 {
433 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
434 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
435 dmu_replay_record_t *drr;
436 dmu_sendarg_t *dsp;
437 int err;
438 uint64_t fromtxg = 0;
439
440 /* tosnap must be a snapshot */
441 if (ds->ds_phys->ds_next_snap_obj == 0)
442 return (EINVAL);
443
444 /*
445 * fromsnap must be an earlier snapshot from the same fs as tosnap,
446 * or the origin's fs.
447 */
448 if (fromds != NULL && !is_before(ds, fromds))
449 return (EXDEV);
450
451 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
452 drr->drr_type = DRR_BEGIN;
453 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
454 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
455 DMU_SUBSTREAM);
456
457 #ifdef _KERNEL
458 if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
459 uint64_t version;
460 if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0) {
461 kmem_free(drr, sizeof (dmu_replay_record_t));
462 return (EINVAL);
463 }
464 if (version == ZPL_VERSION_SA) {
465 DMU_SET_FEATUREFLAGS(
466 drr->drr_u.drr_begin.drr_versioninfo,
467 DMU_BACKUP_FEATURE_SA_SPILL);
468 }
469 }
470 #endif
471
472 drr->drr_u.drr_begin.drr_creation_time =
473 ds->ds_phys->ds_creation_time;
474 drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
475 if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
476 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
477 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
478 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
479 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
480
481 if (fromds)
482 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
483 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
484
485 if (fromds)
486 fromtxg = fromds->ds_phys->ds_creation_txg;
487
488 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
489
490 dsp->dsa_drr = drr;
491 dsp->dsa_vp = vp;
492 dsp->dsa_outfd = outfd;
493 dsp->dsa_proc = curproc;
494 dsp->dsa_os = tosnap;
495 dsp->dsa_off = off;
496 dsp->dsa_toguid = ds->ds_phys->ds_guid;
497 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
498 dsp->dsa_pending_op = PENDING_NONE;
499
500 mutex_enter(&ds->ds_sendstream_lock);
501 list_insert_head(&ds->ds_sendstreams, dsp);
502 mutex_exit(&ds->ds_sendstream_lock);
503
504 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
505 err = dsp->dsa_err;
506 goto out;
524 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
525 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
526
527 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
528 err = dsp->dsa_err;
529 goto out;
530 }
531
532 out:
533 mutex_enter(&ds->ds_sendstream_lock);
534 list_remove(&ds->ds_sendstreams, dsp);
535 mutex_exit(&ds->ds_sendstream_lock);
536
537 kmem_free(drr, sizeof (dmu_replay_record_t));
538 kmem_free(dsp, sizeof (dmu_sendarg_t));
539
540 return (err);
541 }
542
543 int
544 dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, uint64_t *sizep)
545 {
546 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
547 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
548 dsl_pool_t *dp = ds->ds_dir->dd_pool;
549 int err;
550 uint64_t size;
551
552 /* tosnap must be a snapshot */
553 if (ds->ds_phys->ds_next_snap_obj == 0)
554 return (EINVAL);
555
556 /*
557 * fromsnap must be an earlier snapshot from the same fs as tosnap,
558 * or the origin's fs.
559 */
560 if (fromds != NULL && !is_before(ds, fromds))
561 return (EXDEV);
562
563 /* Get uncompressed size estimate of changed data. */
564 if (fromds == NULL) {
565 size = ds->ds_phys->ds_uncompressed_bytes;
566 } else {
567 uint64_t used, comp;
568 err = dsl_dataset_space_written(fromds, ds,
569 &used, &comp, &size);
570 if (err)
571 return (err);
572 }
573
574 /*
575 * Assume that space (both on-disk and in-stream) is dominated by
576 * data. We will adjust for indirect blocks and the copies property,
577 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
578 */
579
580 /*
581 * Subtract out approximate space used by indirect blocks.
582 * Assume most space is used by data blocks (non-indirect, non-dnode).
583 * Assume all blocks are recordsize. Assume ditto blocks and
584 * internal fragmentation counter out compression.
585 *
586 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
587 * block, which we observe in practice.
588 */
589 uint64_t recordsize;
648
649 static void
650 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
651 {
652 dsl_dir_t *dd = arg1;
653 struct recvbeginsyncarg *rbsa = arg2;
654 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
655 uint64_t dsobj;
656
657 /* Create and open new dataset. */
658 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
659 rbsa->origin, flags, rbsa->cr, tx);
660 VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
661 B_TRUE, dmu_recv_tag, &rbsa->ds));
662
663 if (rbsa->origin == NULL) {
664 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
665 rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
666 }
667
668 spa_history_log_internal_ds(rbsa->ds, "receive new", tx, "");
669 }
670
671 /* ARGSUSED */
672 static int
673 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
674 {
675 dsl_dataset_t *ds = arg1;
676 struct recvbeginsyncarg *rbsa = arg2;
677 int err;
678 uint64_t val;
679
680 /* must not have any changes since most recent snapshot */
681 if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
682 return (ETXTBSY);
683
684 /* new snapshot name must not exist */
685 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
686 ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
687 if (err == 0)
688 return (EEXIST);
749 dsl_dataset_t *cds;
750 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
751 uint64_t dsobj;
752
753 /* create and open the temporary clone */
754 dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
755 ohds->ds_prev, flags, rbsa->cr, tx);
756 VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
757
758 /*
759 * If we actually created a non-clone, we need to create the
760 * objset in our new dataset.
761 */
762 if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
763 (void) dmu_objset_create_impl(dp->dp_spa,
764 cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
765 }
766
767 rbsa->ds = cds;
768
769 spa_history_log_internal_ds(cds, "receive over existing", tx, "");
770 }
771
772 static boolean_t
773 dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
774 {
775 int featureflags;
776
777 featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
778
779 /* Verify pool version supports SA if SA_SPILL feature set */
780 return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
781 (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
782 }
783
784 /*
785 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
786 * succeeds; otherwise we will leak the holds on the datasets.
787 */
788 int
789 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
1557
1558 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1559 }
1560
1561 static void
1562 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1563 {
1564 dsl_dataset_t *ds = arg1;
1565 struct recvendsyncarg *resa = arg2;
1566
1567 dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1568
1569 /* set snapshot's creation time and guid */
1570 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1571 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1572 ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1573 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1574
1575 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1576 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1577 spa_history_log_internal_ds(ds, "finished receiving", tx, "");
1578 }
1579
1580 static int
1581 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1582 {
1583 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1584 uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1585 dsl_dataset_t *snapds;
1586 guid_map_entry_t *gmep;
1587 int err;
1588
1589 ASSERT(guid_map != NULL);
1590
1591 rw_enter(&dp->dp_config_rwlock, RW_READER);
1592 err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1593 if (err == 0) {
1594 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1595 gmep->guid = snapds->ds_phys->ds_guid;
1596 gmep->gme_ds = snapds;
1597 avl_add(guid_map, gmep);
|