833
834 /*
835 * Process the begin in syncing context.
836 */
837
838 /* open the dataset we are logically receiving into */
839 err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
840 if (err == 0) {
841 if (dmu_recv_verify_features(ds, drrb)) {
842 dsl_dataset_rele(ds, dmu_recv_tag);
843 return (ENOTSUP);
844 }
845 /* target fs already exists; recv into temp clone */
846
847 /* Can't recv a clone into an existing fs */
848 if (flags & DRR_FLAG_CLONE) {
849 dsl_dataset_rele(ds, dmu_recv_tag);
850 return (EINVAL);
851 }
852
853 /* must not have an incremental recv already in progress */
854 if (!mutex_tryenter(&ds->ds_recvlock)) {
855 dsl_dataset_rele(ds, dmu_recv_tag);
856 return (EBUSY);
857 }
858
859 /* tmp clone name is: tofs/%tosnap" */
860 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
861 "%%%s", tosnap);
862 rbsa.force = force;
863 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
864 recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
865 if (err) {
866 mutex_exit(&ds->ds_recvlock);
867 dsl_dataset_rele(ds, dmu_recv_tag);
868 return (err);
869 }
870 drc->drc_logical_ds = ds;
871 drc->drc_real_ds = rbsa.ds;
872 } else if (err == ENOENT) {
873 /* target fs does not exist; must be a full backup or clone */
874 char *cp;
875
876 /*
877 * If it's a non-clone incremental, we are missing the
878 * target fs, so fail the recv.
879 */
880 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
881 return (ENOENT);
882
883 /* Open the parent of tofs */
884 cp = strrchr(tofs, '/');
885 *cp = '\0';
886 err = dsl_dataset_hold(tofs, FTAG, &ds);
887 *cp = '/';
888 if (err)
889 return (err);
890
891 if (dmu_recv_verify_features(ds, drrb)) {
892 dsl_dataset_rele(ds, FTAG);
893 return (ENOTSUP);
894 }
895
896 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
897 recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
898 dsl_dataset_rele(ds, FTAG);
899 if (err)
900 return (err);
901 drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
902 drc->drc_newfs = B_TRUE;
903 }
904
905 return (err);
906 }
907
908 struct restorearg {
909 int err;
910 int byteswap;
911 vnode_t *vp;
912 char *buf;
913 uint64_t voff;
914 int bufsize; /* amount of memory allocated for buf */
915 zio_cksum_t cksum;
916 avl_tree_t *guid_to_ds_map;
917 };
918
919 typedef struct guid_map_entry {
920 uint64_t guid;
921 dsl_dataset_t *gme_ds;
1511 ra.err = EINVAL;
1512 goto out;
1513 }
1514 pcksum = ra.cksum;
1515 }
1516 ASSERT(ra.err != 0);
1517
1518 out:
1519 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1520 zfs_onexit_fd_rele(cleanup_fd);
1521
1522 if (ra.err != 0) {
1523 /*
1524 * destroy what we created, so we don't leave it in the
1525 * inconsistent restoring state.
1526 */
1527 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1528
1529 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1530 B_FALSE);
1531 if (drc->drc_real_ds != drc->drc_logical_ds) {
1532 mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1533 dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1534 }
1535 }
1536
1537 kmem_free(ra.buf, ra.bufsize);
1538 *voffp = ra.voff;
1539 return (ra.err);
1540 }
1541
1542 struct recvendsyncarg {
1543 char *tosnap;
1544 uint64_t creation_time;
1545 uint64_t toguid;
1546 };
1547
1548 static int
1549 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1550 {
1551 dsl_dataset_t *ds = arg1;
1552 struct recvendsyncarg *resa = arg2;
1553
1554 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1583
1584 ASSERT(guid_map != NULL);
1585
1586 rw_enter(&dp->dp_config_rwlock, RW_READER);
1587 err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1588 if (err == 0) {
1589 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1590 gmep->guid = snapds->ds_phys->ds_guid;
1591 gmep->gme_ds = snapds;
1592 avl_add(guid_map, gmep);
1593 }
1594
1595 rw_exit(&dp->dp_config_rwlock);
1596 return (err);
1597 }
1598
1599 static int
1600 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1601 {
1602 struct recvendsyncarg resa;
1603 dsl_dataset_t *ds = drc->drc_logical_ds;
1604 int err, myerr;
1605
1606 /*
1607 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1608 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1609 * can close it.
1610 */
1611 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1612
1613 if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1614 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1615 drc->drc_force);
1616 if (err)
1617 goto out;
1618 } else {
1619 mutex_exit(&ds->ds_recvlock);
1620 dsl_dataset_rele(ds, dmu_recv_tag);
1621 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1622 B_FALSE);
1623 return (EBUSY);
1624 }
1625
1626 resa.creation_time = drc->drc_drrb->drr_creation_time;
1627 resa.toguid = drc->drc_drrb->drr_toguid;
1628 resa.tosnap = drc->drc_tosnap;
1629
1630 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1631 recv_end_check, recv_end_sync, ds, &resa, 3);
1632 if (err) {
1633 /* swap back */
1634 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1635 }
1636
1637 out:
1638 mutex_exit(&ds->ds_recvlock);
1639 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1640 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1641 dsl_dataset_disown(ds, dmu_recv_tag);
1642 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1643 ASSERT3U(myerr, ==, 0);
1644 return (err);
1645 }
1646
1647 static int
1648 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1649 {
1650 struct recvendsyncarg resa;
1651 dsl_dataset_t *ds = drc->drc_logical_ds;
1652 int err;
1653
1654 /*
1655 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1656 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1657 * can close it.
1658 */
1659 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1660
1661 resa.creation_time = drc->drc_drrb->drr_creation_time;
1662 resa.toguid = drc->drc_drrb->drr_toguid;
1663 resa.tosnap = drc->drc_tosnap;
1664
1665 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1666 recv_end_check, recv_end_sync, ds, &resa, 3);
1667 if (err) {
1668 /* clean up the fs we just recv'd into */
1669 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1670 } else {
1671 if (drc->drc_guid_to_ds_map != NULL)
1672 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1673 /* release the hold from dmu_recv_begin */
1674 dsl_dataset_disown(ds, dmu_recv_tag);
1675 }
1676 return (err);
1677 }
1678
1679 int
1680 dmu_recv_end(dmu_recv_cookie_t *drc)
1681 {
1682 if (drc->drc_logical_ds != drc->drc_real_ds)
1683 return (dmu_recv_existing_end(drc));
1684 else
1685 return (dmu_recv_new_end(drc));
1686 }
|
833
834 /*
835 * Process the begin in syncing context.
836 */
837
838 /* open the dataset we are logically receiving into */
839 err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
840 if (err == 0) {
841 if (dmu_recv_verify_features(ds, drrb)) {
842 dsl_dataset_rele(ds, dmu_recv_tag);
843 return (ENOTSUP);
844 }
845 /* target fs already exists; recv into temp clone */
846
847 /* Can't recv a clone into an existing fs */
848 if (flags & DRR_FLAG_CLONE) {
849 dsl_dataset_rele(ds, dmu_recv_tag);
850 return (EINVAL);
851 }
852
853 /* tmp clone name is: tofs/%tosnap" */
854 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
855 "%%%s", tosnap);
856 rbsa.force = force;
857 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
858 recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
859 if (err) {
860 dsl_dataset_rele(ds, dmu_recv_tag);
861 return (err);
862 }
863 drc->drc_logical_dsobj = ds->ds_object;
864 drc->drc_real_ds = rbsa.ds;
865 dsl_dataset_rele(ds, dmu_recv_tag);
866 } else if (err == ENOENT) {
867 /* target fs does not exist; must be a full backup or clone */
868 char *cp;
869
870 /*
871 * If it's a non-clone incremental, we are missing the
872 * target fs, so fail the recv.
873 */
874 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
875 return (ENOENT);
876
877 /* Open the parent of tofs */
878 cp = strrchr(tofs, '/');
879 *cp = '\0';
880 err = dsl_dataset_hold(tofs, FTAG, &ds);
881 *cp = '/';
882 if (err)
883 return (err);
884
885 if (dmu_recv_verify_features(ds, drrb)) {
886 dsl_dataset_rele(ds, FTAG);
887 return (ENOTSUP);
888 }
889
890 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
891 recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
892 dsl_dataset_rele(ds, FTAG);
893 if (err)
894 return (err);
895 drc->drc_real_ds = rbsa.ds;
896 drc->drc_newfs = B_TRUE;
897 }
898
899 return (err);
900 }
901
902 struct restorearg {
903 int err;
904 int byteswap;
905 vnode_t *vp;
906 char *buf;
907 uint64_t voff;
908 int bufsize; /* amount of memory allocated for buf */
909 zio_cksum_t cksum;
910 avl_tree_t *guid_to_ds_map;
911 };
912
913 typedef struct guid_map_entry {
914 uint64_t guid;
915 dsl_dataset_t *gme_ds;
1505 ra.err = EINVAL;
1506 goto out;
1507 }
1508 pcksum = ra.cksum;
1509 }
1510 ASSERT(ra.err != 0);
1511
1512 out:
1513 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1514 zfs_onexit_fd_rele(cleanup_fd);
1515
1516 if (ra.err != 0) {
1517 /*
1518 * destroy what we created, so we don't leave it in the
1519 * inconsistent restoring state.
1520 */
1521 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1522
1523 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1524 B_FALSE);
1525 }
1526
1527 kmem_free(ra.buf, ra.bufsize);
1528 *voffp = ra.voff;
1529 return (ra.err);
1530 }
1531
1532 struct recvendsyncarg {
1533 char *tosnap;
1534 uint64_t creation_time;
1535 uint64_t toguid;
1536 };
1537
1538 static int
1539 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1540 {
1541 dsl_dataset_t *ds = arg1;
1542 struct recvendsyncarg *resa = arg2;
1543
1544 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1573
1574 ASSERT(guid_map != NULL);
1575
1576 rw_enter(&dp->dp_config_rwlock, RW_READER);
1577 err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1578 if (err == 0) {
1579 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1580 gmep->guid = snapds->ds_phys->ds_guid;
1581 gmep->gme_ds = snapds;
1582 avl_add(guid_map, gmep);
1583 }
1584
1585 rw_exit(&dp->dp_config_rwlock);
1586 return (err);
1587 }
1588
1589 static int
1590 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1591 {
1592 struct recvendsyncarg resa;
1593 dsl_dataset_t *ds;
1594 int err, myerr;
1595 dsl_pool_t *dp = drc->drc_real_ds->ds_dir->dd_pool;
1596
1597 rw_enter(&dp->dp_config_rwlock, RW_READER);
1598 err = dsl_dataset_own_obj(dp, drc->drc_logical_dsobj, FALSE,
1599 dmu_recv_tag, &ds);
1600 rw_exit(&dp->dp_config_rwlock);
1601
1602 if (err) {
1603 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1604 B_FALSE);
1605 return (EBUSY);
1606 }
1607
1608 /*
1609 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1610 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1611 * can close it.
1612 */
1613 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1614
1615 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds, drc->drc_force);
1616 if (err)
1617 goto out;
1618
1619 resa.creation_time = drc->drc_drrb->drr_creation_time;
1620 resa.toguid = drc->drc_drrb->drr_toguid;
1621 resa.tosnap = drc->drc_tosnap;
1622
1623 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1624 recv_end_check, recv_end_sync, ds, &resa, 3);
1625 if (err) {
1626 /* swap back */
1627 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1628 }
1629
1630 out:
1631 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1632 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1633 dsl_dataset_disown(ds, dmu_recv_tag);
1634 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1635 ASSERT3U(myerr, ==, 0);
1636 return (err);
1637 }
1638
1639 static int
1640 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1641 {
1642 struct recvendsyncarg resa;
1643 dsl_dataset_t *ds = drc->drc_real_ds;
1644 int err;
1645
1646 /*
1647 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1648 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1649 * can close it.
1650 */
1651 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1652
1653 resa.creation_time = drc->drc_drrb->drr_creation_time;
1654 resa.toguid = drc->drc_drrb->drr_toguid;
1655 resa.tosnap = drc->drc_tosnap;
1656
1657 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1658 recv_end_check, recv_end_sync, ds, &resa, 3);
1659 if (err) {
1660 /* clean up the fs we just recv'd into */
1661 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1662 } else {
1663 if (drc->drc_guid_to_ds_map != NULL)
1664 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1665 /* release the hold from dmu_recv_begin */
1666 dsl_dataset_disown(ds, dmu_recv_tag);
1667 }
1668 return (err);
1669 }
1670
1671 int
1672 dmu_recv_end(dmu_recv_cookie_t *drc)
1673 {
1674 if (!drc->drc_newfs)
1675 return (dmu_recv_existing_end(drc));
1676 else
1677 return (dmu_recv_new_end(drc));
1678 }
|