34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zap.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
46 #include <sys/avl.h>
47 #include <sys/ddt.h>
48 #include <sys/zfs_onexit.h>
49
50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 int zfs_send_corrupt_data = B_FALSE;
52
53 static char *dmu_recv_tag = "dmu_recv_tag";
54
55 static int
56 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
57 {
58 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
59 ssize_t resid; /* have to get resid to get detailed errno */
60 ASSERT0(len % 8);
61
62 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
63 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
64 (caddr_t)buf, len,
65 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
66
67 mutex_enter(&ds->ds_sendstream_lock);
68 *dsp->dsa_off += len;
69 mutex_exit(&ds->ds_sendstream_lock);
70
71 return (dsp->dsa_err);
72 }
73
626 objset_t *mos = dd->dd_pool->dp_meta_objset;
627 uint64_t val;
628 int err;
629
630 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
631 strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
632
633 if (err != ENOENT)
634 return (err ? err : EEXIST);
635
636 if (rbsa->origin) {
637 /* make sure it's a snap in the same pool */
638 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
639 return (EXDEV);
640 if (!dsl_dataset_is_snapshot(rbsa->origin))
641 return (EINVAL);
642 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
643 return (ENODEV);
644 }
645
646 return (0);
647 }
648
649 static void
650 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
651 {
652 dsl_dir_t *dd = arg1;
653 struct recvbeginsyncarg *rbsa = arg2;
654 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
655 uint64_t dsobj;
656
657 /* Create and open new dataset. */
658 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
659 rbsa->origin, flags, rbsa->cr, tx);
660 VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
661 B_TRUE, dmu_recv_tag, &rbsa->ds));
662
663 if (rbsa->origin == NULL) {
664 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
665 rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
708 if (err)
709 return (ENODEV);
710 if (snap->ds_phys->ds_creation_txg < birth) {
711 dsl_dataset_rele(snap, FTAG);
712 return (ENODEV);
713 }
714 if (snap->ds_phys->ds_guid == rbsa->fromguid) {
715 dsl_dataset_rele(snap, FTAG);
716 break; /* it's ok */
717 }
718 obj = snap->ds_phys->ds_prev_snap_obj;
719 dsl_dataset_rele(snap, FTAG);
720 }
721 if (obj == 0)
722 return (ENODEV);
723 }
724 } else {
725 /* if full, most recent snapshot must be $ORIGIN */
726 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
727 return (ENODEV);
728 }
729
730 /* temporary clone name must not exist */
731 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
732 ds->ds_dir->dd_phys->dd_child_dir_zapobj,
733 rbsa->clonelastname, 8, 1, &val);
734 if (err == 0)
735 return (EEXIST);
736 if (err != ENOENT)
737 return (err);
738
739 return (0);
740 }
741
742 /* ARGSUSED */
743 static void
744 recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
745 {
746 dsl_dataset_t *ohds = arg1;
747 struct recvbeginsyncarg *rbsa = arg2;
1530 */
1531 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1532
1533 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1534 B_FALSE);
1535 if (drc->drc_real_ds != drc->drc_logical_ds) {
1536 mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1537 dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1538 }
1539 }
1540
1541 kmem_free(ra.buf, ra.bufsize);
1542 *voffp = ra.voff;
1543 return (ra.err);
1544 }
1545
1546 struct recvendsyncarg {
1547 char *tosnap;
1548 uint64_t creation_time;
1549 uint64_t toguid;
1550 };
1551
1552 static int
1553 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1554 {
1555 dsl_dataset_t *ds = arg1;
1556 struct recvendsyncarg *resa = arg2;
1557
1558 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1559 }
1560
1561 static void
1562 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1563 {
1564 dsl_dataset_t *ds = arg1;
1565 struct recvendsyncarg *resa = arg2;
1566
1567 dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1568
1569 /* set snapshot's creation time and guid */
1570 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1571 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1572 ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1573 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1574
1575 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1576 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1577 spa_history_log_internal_ds(ds, "finished receiving", tx, "");
1578 }
1579
1580 static int
1581 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1582 {
1583 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1584 uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1585 dsl_dataset_t *snapds;
1586 guid_map_entry_t *gmep;
1599
1600 rw_exit(&dp->dp_config_rwlock);
1601 return (err);
1602 }
1603
1604 static int
1605 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1606 {
1607 struct recvendsyncarg resa;
1608 dsl_dataset_t *ds = drc->drc_logical_ds;
1609 int err, myerr;
1610
1611 if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1612 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1613 drc->drc_force);
1614 if (err)
1615 goto out;
1616 } else {
1617 mutex_exit(&ds->ds_recvlock);
1618 dsl_dataset_rele(ds, dmu_recv_tag);
1619 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1620 B_FALSE);
1621 return (EBUSY);
1622 }
1623
1624 resa.creation_time = drc->drc_drrb->drr_creation_time;
1625 resa.toguid = drc->drc_drrb->drr_toguid;
1626 resa.tosnap = drc->drc_tosnap;
1627
1628 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1629 recv_end_check, recv_end_sync, ds, &resa, 3);
1630 if (err) {
1631 /* swap back */
1632 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1633 }
1634
1635 out:
1636 mutex_exit(&ds->ds_recvlock);
1637 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1638 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1639 dsl_dataset_disown(ds, dmu_recv_tag);
1640 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1641 ASSERT0(myerr);
1642 return (err);
1643 }
1644
1645 static int
1646 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1647 {
1648 struct recvendsyncarg resa;
1649 dsl_dataset_t *ds = drc->drc_logical_ds;
1650 int err;
1651
1652 /*
1653 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1654 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1655 * can close it.
1656 */
1657 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1658
1659 resa.creation_time = drc->drc_drrb->drr_creation_time;
1660 resa.toguid = drc->drc_drrb->drr_toguid;
1661 resa.tosnap = drc->drc_tosnap;
1662
1663 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1664 recv_end_check, recv_end_sync, ds, &resa, 3);
1665 if (err) {
1666 /* clean up the fs we just recv'd into */
1667 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1668 } else {
1669 if (drc->drc_guid_to_ds_map != NULL)
1670 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1671 /* release the hold from dmu_recv_begin */
1672 dsl_dataset_disown(ds, dmu_recv_tag);
1673 }
1674 return (err);
1675 }
1676
1677 int
1678 dmu_recv_end(dmu_recv_cookie_t *drc)
1679 {
1680 if (drc->drc_logical_ds != drc->drc_real_ds)
1681 return (dmu_recv_existing_end(drc));
|
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zap.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
46 #include <sys/avl.h>
47 #include <sys/ddt.h>
48 #include <sys/zfs_onexit.h>
49
50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 int zfs_send_corrupt_data = B_FALSE;
52
53 static char *dmu_recv_tag = "dmu_recv_tag";
54 char *tmp_dmu_recv_tag = "tmp_dmu_recv_tag";
55
56 static int
57 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
58 {
59 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
60 ssize_t resid; /* have to get resid to get detailed errno */
61 ASSERT0(len % 8);
62
63 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
64 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
65 (caddr_t)buf, len,
66 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
67
68 mutex_enter(&ds->ds_sendstream_lock);
69 *dsp->dsa_off += len;
70 mutex_exit(&ds->ds_sendstream_lock);
71
72 return (dsp->dsa_err);
73 }
74
627 objset_t *mos = dd->dd_pool->dp_meta_objset;
628 uint64_t val;
629 int err;
630
631 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
632 strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
633
634 if (err != ENOENT)
635 return (err ? err : EEXIST);
636
637 if (rbsa->origin) {
638 /* make sure it's a snap in the same pool */
639 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
640 return (EXDEV);
641 if (!dsl_dataset_is_snapshot(rbsa->origin))
642 return (EINVAL);
643 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
644 return (ENODEV);
645 }
646
647 /*
648 * Check dataset and snapshot quotas before receiving. We'll recheck
649 * again at the end, but might as well abort before receiving if we're
650 * already over quota.
651 */
652 if (dd->dd_parent != NULL) {
653 err = dsl_dir_dscount_check(dd->dd_parent, NULL, 1, NULL);
654 if (err != 0)
655 return (err);
656 }
657
658 err = dsl_snapcount_check(dd, tx, 1, NULL);
659 if (err != 0)
660 return (err);
661
662
663 return (0);
664 }
665
666 static void
667 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
668 {
669 dsl_dir_t *dd = arg1;
670 struct recvbeginsyncarg *rbsa = arg2;
671 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
672 uint64_t dsobj;
673
674 /* Create and open new dataset. */
675 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
676 rbsa->origin, flags, rbsa->cr, tx);
677 VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
678 B_TRUE, dmu_recv_tag, &rbsa->ds));
679
680 if (rbsa->origin == NULL) {
681 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
682 rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
725 if (err)
726 return (ENODEV);
727 if (snap->ds_phys->ds_creation_txg < birth) {
728 dsl_dataset_rele(snap, FTAG);
729 return (ENODEV);
730 }
731 if (snap->ds_phys->ds_guid == rbsa->fromguid) {
732 dsl_dataset_rele(snap, FTAG);
733 break; /* it's ok */
734 }
735 obj = snap->ds_phys->ds_prev_snap_obj;
736 dsl_dataset_rele(snap, FTAG);
737 }
738 if (obj == 0)
739 return (ENODEV);
740 }
741 } else {
742 /* if full, most recent snapshot must be $ORIGIN */
743 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
744 return (ENODEV);
745
746 /* Check snapshot quota before receiving */
747 err = dsl_snapcount_check(ds->ds_dir, tx, 1, NULL);
748 if (err != 0)
749 return (err);
750 }
751
752 /* temporary clone name must not exist */
753 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
754 ds->ds_dir->dd_phys->dd_child_dir_zapobj,
755 rbsa->clonelastname, 8, 1, &val);
756 if (err == 0)
757 return (EEXIST);
758 if (err != ENOENT)
759 return (err);
760
761 return (0);
762 }
763
764 /* ARGSUSED */
765 static void
766 recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
767 {
768 dsl_dataset_t *ohds = arg1;
769 struct recvbeginsyncarg *rbsa = arg2;
1552 */
1553 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1554
1555 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1556 B_FALSE);
1557 if (drc->drc_real_ds != drc->drc_logical_ds) {
1558 mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1559 dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1560 }
1561 }
1562
1563 kmem_free(ra.buf, ra.bufsize);
1564 *voffp = ra.voff;
1565 return (ra.err);
1566 }
1567
1568 struct recvendsyncarg {
1569 char *tosnap;
1570 uint64_t creation_time;
1571 uint64_t toguid;
1572 boolean_t is_new;
1573 };
1574
1575 static int
1576 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1577 {
1578 dsl_dataset_t *ds = arg1;
1579 struct recvendsyncarg *resa = arg2;
1580
1581 if (resa->is_new) {
1582 /* re-check the dataset quota now that recv is complete */
1583 dsl_dir_t *dd;
1584 int err;
1585
1586 dd = ds->ds_dir;
1587 if (dd->dd_parent != NULL) {
1588 err = dsl_dir_dscount_check(dd->dd_parent, NULL, 1,
1589 NULL);
1590 if (err != 0)
1591 return (err);
1592 }
1593 }
1594
1595 return (dsl_dataset_snapshot_check(ds, resa->tosnap, 1, tx));
1596 }
1597
1598 static void
1599 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1600 {
1601 dsl_dataset_t *ds = arg1;
1602 struct recvendsyncarg *resa = arg2;
1603
1604 if (resa->is_new)
1605 /* update the dataset counts */
1606 dsl_dir_dscount_adjust(ds->ds_dir->dd_parent, tx, 1, B_FALSE,
1607 B_TRUE);
1608
1609 dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1610
1611 /* set snapshot's creation time and guid */
1612 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1613 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1614 ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1615 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1616
1617 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1618 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1619 spa_history_log_internal_ds(ds, "finished receiving", tx, "");
1620 }
1621
1622 static int
1623 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1624 {
1625 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1626 uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1627 dsl_dataset_t *snapds;
1628 guid_map_entry_t *gmep;
1641
1642 rw_exit(&dp->dp_config_rwlock);
1643 return (err);
1644 }
1645
1646 static int
1647 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1648 {
1649 struct recvendsyncarg resa;
1650 dsl_dataset_t *ds = drc->drc_logical_ds;
1651 int err, myerr;
1652
1653 if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1654 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1655 drc->drc_force);
1656 if (err)
1657 goto out;
1658 } else {
1659 mutex_exit(&ds->ds_recvlock);
1660 dsl_dataset_rele(ds, dmu_recv_tag);
1661 /* tag indicates temporary ds to dsl_dir_destroy_sync */
1662 (void) dsl_dataset_destroy(drc->drc_real_ds, tmp_dmu_recv_tag,
1663 B_FALSE);
1664 return (EBUSY);
1665 }
1666
1667 resa.creation_time = drc->drc_drrb->drr_creation_time;
1668 resa.toguid = drc->drc_drrb->drr_toguid;
1669 resa.tosnap = drc->drc_tosnap;
1670 resa.is_new = B_FALSE;
1671
1672 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1673 recv_end_check, recv_end_sync, ds, &resa, 3);
1674 if (err) {
1675 /* swap back */
1676 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1677 }
1678
1679 out:
1680 mutex_exit(&ds->ds_recvlock);
1681 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1682 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1683 dsl_dataset_disown(ds, dmu_recv_tag);
1684 /* tag indicates temporary ds to dsl_dir_destroy_sync */
1685 myerr = dsl_dataset_destroy(drc->drc_real_ds, tmp_dmu_recv_tag,
1686 B_FALSE);
1687 ASSERT0(myerr);
1688 return (err);
1689 }
1690
1691 static int
1692 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1693 {
1694 struct recvendsyncarg resa;
1695 dsl_dataset_t *ds = drc->drc_logical_ds;
1696 int err;
1697
1698 /*
1699 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1700 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1701 * can close it.
1702 */
1703 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1704
1705 resa.creation_time = drc->drc_drrb->drr_creation_time;
1706 resa.toguid = drc->drc_drrb->drr_toguid;
1707 resa.tosnap = drc->drc_tosnap;
1708 resa.is_new = B_TRUE;
1709
1710 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1711 recv_end_check, recv_end_sync, ds, &resa, 3);
1712 if (err) {
1713 /* clean up the fs we just recv'd into */
1714 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1715 } else {
1716 if (drc->drc_guid_to_ds_map != NULL)
1717 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1718 /* release the hold from dmu_recv_begin */
1719 dsl_dataset_disown(ds, dmu_recv_tag);
1720 }
1721 return (err);
1722 }
1723
1724 int
1725 dmu_recv_end(dmu_recv_cookie_t *drc)
1726 {
1727 if (drc->drc_logical_ds != drc->drc_real_ds)
1728 return (dmu_recv_existing_end(drc));
|