40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zap.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
46 #include <sys/avl.h>
47 #include <sys/ddt.h>
48 #include <sys/zfs_onexit.h>
49
50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 int zfs_send_corrupt_data = B_FALSE;
52
53 static char *dmu_recv_tag = "dmu_recv_tag";
54
55 static int
56 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
57 {
58 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
59 ssize_t resid; /* have to get resid to get detailed errno */
60 ASSERT3U(len % 8, ==, 0);
61
62 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
63 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
64 (caddr_t)buf, len,
65 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
66
67 mutex_enter(&ds->ds_sendstream_lock);
68 *dsp->dsa_off += len;
69 mutex_exit(&ds->ds_sendstream_lock);
70
71 return (dsp->dsa_err);
72 }
73
74 static int
75 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
76 uint64_t length)
77 {
78 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
79
80 if (length != -1ULL && offset + length < offset)
944 {
945 avl_tree_t *ca = arg;
946 void *cookie = NULL;
947 guid_map_entry_t *gmep;
948
949 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
950 dsl_dataset_rele(gmep->gme_ds, ca);
951 kmem_free(gmep, sizeof (guid_map_entry_t));
952 }
953 avl_destroy(ca);
954 kmem_free(ca, sizeof (avl_tree_t));
955 }
956
957 static void *
958 restore_read(struct restorearg *ra, int len)
959 {
960 void *rv;
961 int done = 0;
962
963 /* some things will require 8-byte alignment, so everything must */
964 ASSERT3U(len % 8, ==, 0);
965
966 while (done < len) {
967 ssize_t resid;
968
969 ra->err = vn_rdwr(UIO_READ, ra->vp,
970 (caddr_t)ra->buf + done, len - done,
971 ra->voff, UIO_SYSSPACE, FAPPEND,
972 RLIM64_INFINITY, CRED(), &resid);
973
974 if (resid == len - done)
975 ra->err = EINVAL;
976 ra->voff += len - done - resid;
977 done = len - resid;
978 if (ra->err)
979 return (NULL);
980 }
981
982 ASSERT3U(done, ==, len);
983 rv = ra->buf;
984 if (ra->byteswap)
1628 return (EBUSY);
1629 }
1630
1631 resa.creation_time = drc->drc_drrb->drr_creation_time;
1632 resa.toguid = drc->drc_drrb->drr_toguid;
1633 resa.tosnap = drc->drc_tosnap;
1634
1635 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1636 recv_end_check, recv_end_sync, ds, &resa, 3);
1637 if (err) {
1638 /* swap back */
1639 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1640 }
1641
1642 out:
1643 mutex_exit(&ds->ds_recvlock);
1644 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1645 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1646 dsl_dataset_disown(ds, dmu_recv_tag);
1647 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1648 ASSERT3U(myerr, ==, 0);
1649 return (err);
1650 }
1651
1652 static int
1653 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1654 {
1655 struct recvendsyncarg resa;
1656 dsl_dataset_t *ds = drc->drc_logical_ds;
1657 int err;
1658
1659 /*
1660 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1661 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1662 * can close it.
1663 */
1664 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1665
1666 resa.creation_time = drc->drc_drrb->drr_creation_time;
1667 resa.toguid = drc->drc_drrb->drr_toguid;
1668 resa.tosnap = drc->drc_tosnap;
|
40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zap.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
46 #include <sys/avl.h>
47 #include <sys/ddt.h>
48 #include <sys/zfs_onexit.h>
49
50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 int zfs_send_corrupt_data = B_FALSE;
52
53 static char *dmu_recv_tag = "dmu_recv_tag";
54
55 static int
56 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
57 {
58 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
59 ssize_t resid; /* have to get resid to get detailed errno */
60 ASSERT0(len % 8);
61
62 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
63 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
64 (caddr_t)buf, len,
65 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
66
67 mutex_enter(&ds->ds_sendstream_lock);
68 *dsp->dsa_off += len;
69 mutex_exit(&ds->ds_sendstream_lock);
70
71 return (dsp->dsa_err);
72 }
73
74 static int
75 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
76 uint64_t length)
77 {
78 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
79
80 if (length != -1ULL && offset + length < offset)
944 {
945 avl_tree_t *ca = arg;
946 void *cookie = NULL;
947 guid_map_entry_t *gmep;
948
949 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
950 dsl_dataset_rele(gmep->gme_ds, ca);
951 kmem_free(gmep, sizeof (guid_map_entry_t));
952 }
953 avl_destroy(ca);
954 kmem_free(ca, sizeof (avl_tree_t));
955 }
956
957 static void *
958 restore_read(struct restorearg *ra, int len)
959 {
960 void *rv;
961 int done = 0;
962
963 /* some things will require 8-byte alignment, so everything must */
964 ASSERT0(len % 8);
965
966 while (done < len) {
967 ssize_t resid;
968
969 ra->err = vn_rdwr(UIO_READ, ra->vp,
970 (caddr_t)ra->buf + done, len - done,
971 ra->voff, UIO_SYSSPACE, FAPPEND,
972 RLIM64_INFINITY, CRED(), &resid);
973
974 if (resid == len - done)
975 ra->err = EINVAL;
976 ra->voff += len - done - resid;
977 done = len - resid;
978 if (ra->err)
979 return (NULL);
980 }
981
982 ASSERT3U(done, ==, len);
983 rv = ra->buf;
984 if (ra->byteswap)
1628 return (EBUSY);
1629 }
1630
1631 resa.creation_time = drc->drc_drrb->drr_creation_time;
1632 resa.toguid = drc->drc_drrb->drr_toguid;
1633 resa.tosnap = drc->drc_tosnap;
1634
1635 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1636 recv_end_check, recv_end_sync, ds, &resa, 3);
1637 if (err) {
1638 /* swap back */
1639 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1640 }
1641
1642 out:
1643 mutex_exit(&ds->ds_recvlock);
1644 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1645 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1646 dsl_dataset_disown(ds, dmu_recv_tag);
1647 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1648 ASSERT0(myerr);
1649 return (err);
1650 }
1651
1652 static int
1653 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1654 {
1655 struct recvendsyncarg resa;
1656 dsl_dataset_t *ds = drc->drc_logical_ds;
1657 int err;
1658
1659 /*
1660 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1661 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1662 * can close it.
1663 */
1664 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1665
1666 resa.creation_time = drc->drc_drrb->drr_creation_time;
1667 resa.toguid = drc->drc_drrb->drr_toguid;
1668 resa.tosnap = drc->drc_tosnap;
|