Print this page
2882 implement libzfs_core
2883 changing "canmount" property to "on" should not always remount dataset
2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Bill Pijewski <wdp@joyent.com>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu_send.c
+++ new/usr/src/uts/common/fs/zfs/dmu_send.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/dmu.h>
29 29 #include <sys/dmu_impl.h>
30 30 #include <sys/dmu_tx.h>
31 31 #include <sys/dbuf.h>
32 32 #include <sys/dnode.h>
33 33 #include <sys/zfs_context.h>
34 34 #include <sys/dmu_objset.h>
35 35 #include <sys/dmu_traverse.h>
36 36 #include <sys/dsl_dataset.h>
37 37 #include <sys/dsl_dir.h>
38 38 #include <sys/dsl_prop.h>
39 39 #include <sys/dsl_pool.h>
40 40 #include <sys/dsl_synctask.h>
41 41 #include <sys/zfs_ioctl.h>
42 42 #include <sys/zap.h>
43 43 #include <sys/zio_checksum.h>
44 44 #include <sys/zfs_znode.h>
45 45 #include <zfs_fletcher.h>
46 46 #include <sys/avl.h>
47 47 #include <sys/ddt.h>
48 48 #include <sys/zfs_onexit.h>
49 49
50 50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 51 int zfs_send_corrupt_data = B_FALSE;
52 52
53 53 static char *dmu_recv_tag = "dmu_recv_tag";
54 54
55 55 static int
56 56 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
57 57 {
58 58 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
59 59 ssize_t resid; /* have to get resid to get detailed errno */
60 60 ASSERT3U(len % 8, ==, 0);
61 61
62 62 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
63 63 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
64 64 (caddr_t)buf, len,
65 65 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
66 66
67 67 mutex_enter(&ds->ds_sendstream_lock);
68 68 *dsp->dsa_off += len;
69 69 mutex_exit(&ds->ds_sendstream_lock);
70 70
71 71 return (dsp->dsa_err);
72 72 }
73 73
74 74 static int
75 75 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
76 76 uint64_t length)
77 77 {
78 78 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
79 79
80 80 if (length != -1ULL && offset + length < offset)
81 81 length = -1ULL;
82 82
83 83 /*
84 84 * If there is a pending op, but it's not PENDING_FREE, push it out,
85 85 * since free block aggregation can only be done for blocks of the
86 86 * same type (i.e., DRR_FREE records can only be aggregated with
87 87 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
88 88 * aggregated with other DRR_FREEOBJECTS records.
89 89 */
90 90 if (dsp->dsa_pending_op != PENDING_NONE &&
91 91 dsp->dsa_pending_op != PENDING_FREE) {
92 92 if (dump_bytes(dsp, dsp->dsa_drr,
93 93 sizeof (dmu_replay_record_t)) != 0)
94 94 return (EINTR);
95 95 dsp->dsa_pending_op = PENDING_NONE;
96 96 }
97 97
98 98 if (dsp->dsa_pending_op == PENDING_FREE) {
99 99 /*
100 100 * There should never be a PENDING_FREE if length is -1
101 101 * (because dump_dnode is the only place where this
102 102 * function is called with a -1, and only after flushing
103 103 * any pending record).
104 104 */
105 105 ASSERT(length != -1ULL);
106 106 /*
107 107 * Check to see whether this free block can be aggregated
108 108 * with pending one.
109 109 */
110 110 if (drrf->drr_object == object && drrf->drr_offset +
111 111 drrf->drr_length == offset) {
112 112 drrf->drr_length += length;
113 113 return (0);
114 114 } else {
115 115 /* not a continuation. Push out pending record */
116 116 if (dump_bytes(dsp, dsp->dsa_drr,
117 117 sizeof (dmu_replay_record_t)) != 0)
118 118 return (EINTR);
119 119 dsp->dsa_pending_op = PENDING_NONE;
120 120 }
121 121 }
122 122 /* create a FREE record and make it pending */
123 123 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
124 124 dsp->dsa_drr->drr_type = DRR_FREE;
125 125 drrf->drr_object = object;
126 126 drrf->drr_offset = offset;
127 127 drrf->drr_length = length;
128 128 drrf->drr_toguid = dsp->dsa_toguid;
129 129 if (length == -1ULL) {
130 130 if (dump_bytes(dsp, dsp->dsa_drr,
131 131 sizeof (dmu_replay_record_t)) != 0)
132 132 return (EINTR);
133 133 } else {
134 134 dsp->dsa_pending_op = PENDING_FREE;
135 135 }
136 136
137 137 return (0);
138 138 }
139 139
140 140 static int
141 141 dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
142 142 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
143 143 {
144 144 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
145 145
146 146
147 147 /*
148 148 * If there is any kind of pending aggregation (currently either
149 149 * a grouping of free objects or free blocks), push it out to
150 150 * the stream, since aggregation can't be done across operations
151 151 * of different types.
152 152 */
153 153 if (dsp->dsa_pending_op != PENDING_NONE) {
154 154 if (dump_bytes(dsp, dsp->dsa_drr,
155 155 sizeof (dmu_replay_record_t)) != 0)
156 156 return (EINTR);
157 157 dsp->dsa_pending_op = PENDING_NONE;
158 158 }
159 159 /* write a DATA record */
160 160 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
161 161 dsp->dsa_drr->drr_type = DRR_WRITE;
162 162 drrw->drr_object = object;
163 163 drrw->drr_type = type;
164 164 drrw->drr_offset = offset;
165 165 drrw->drr_length = blksz;
166 166 drrw->drr_toguid = dsp->dsa_toguid;
167 167 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
168 168 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
169 169 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
170 170 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
171 171 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
172 172 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
173 173 drrw->drr_key.ddk_cksum = bp->blk_cksum;
174 174
175 175 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
176 176 return (EINTR);
177 177 if (dump_bytes(dsp, data, blksz) != 0)
178 178 return (EINTR);
179 179 return (0);
180 180 }
181 181
182 182 static int
183 183 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
184 184 {
185 185 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
186 186
187 187 if (dsp->dsa_pending_op != PENDING_NONE) {
188 188 if (dump_bytes(dsp, dsp->dsa_drr,
189 189 sizeof (dmu_replay_record_t)) != 0)
190 190 return (EINTR);
191 191 dsp->dsa_pending_op = PENDING_NONE;
192 192 }
193 193
194 194 /* write a SPILL record */
195 195 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
196 196 dsp->dsa_drr->drr_type = DRR_SPILL;
197 197 drrs->drr_object = object;
198 198 drrs->drr_length = blksz;
199 199 drrs->drr_toguid = dsp->dsa_toguid;
200 200
201 201 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
202 202 return (EINTR);
203 203 if (dump_bytes(dsp, data, blksz))
204 204 return (EINTR);
205 205 return (0);
206 206 }
207 207
208 208 static int
209 209 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
210 210 {
211 211 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
212 212
213 213 /*
214 214 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
215 215 * push it out, since free block aggregation can only be done for
216 216 * blocks of the same type (i.e., DRR_FREE records can only be
217 217 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
218 218 * can only be aggregated with other DRR_FREEOBJECTS records.
219 219 */
220 220 if (dsp->dsa_pending_op != PENDING_NONE &&
221 221 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
222 222 if (dump_bytes(dsp, dsp->dsa_drr,
223 223 sizeof (dmu_replay_record_t)) != 0)
224 224 return (EINTR);
225 225 dsp->dsa_pending_op = PENDING_NONE;
226 226 }
227 227 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
228 228 /*
229 229 * See whether this free object array can be aggregated
230 230 * with pending one
231 231 */
232 232 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
233 233 drrfo->drr_numobjs += numobjs;
234 234 return (0);
235 235 } else {
236 236 /* can't be aggregated. Push out pending record */
237 237 if (dump_bytes(dsp, dsp->dsa_drr,
238 238 sizeof (dmu_replay_record_t)) != 0)
239 239 return (EINTR);
240 240 dsp->dsa_pending_op = PENDING_NONE;
241 241 }
242 242 }
243 243
244 244 /* write a FREEOBJECTS record */
245 245 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
246 246 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
247 247 drrfo->drr_firstobj = firstobj;
248 248 drrfo->drr_numobjs = numobjs;
249 249 drrfo->drr_toguid = dsp->dsa_toguid;
250 250
251 251 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
252 252
253 253 return (0);
254 254 }
255 255
256 256 static int
257 257 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
258 258 {
259 259 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
260 260
261 261 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
262 262 return (dump_freeobjects(dsp, object, 1));
263 263
264 264 if (dsp->dsa_pending_op != PENDING_NONE) {
265 265 if (dump_bytes(dsp, dsp->dsa_drr,
266 266 sizeof (dmu_replay_record_t)) != 0)
267 267 return (EINTR);
268 268 dsp->dsa_pending_op = PENDING_NONE;
269 269 }
270 270
271 271 /* write an OBJECT record */
272 272 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
273 273 dsp->dsa_drr->drr_type = DRR_OBJECT;
274 274 drro->drr_object = object;
275 275 drro->drr_type = dnp->dn_type;
276 276 drro->drr_bonustype = dnp->dn_bonustype;
277 277 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
278 278 drro->drr_bonuslen = dnp->dn_bonuslen;
279 279 drro->drr_checksumtype = dnp->dn_checksum;
280 280 drro->drr_compress = dnp->dn_compress;
281 281 drro->drr_toguid = dsp->dsa_toguid;
282 282
283 283 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
284 284 return (EINTR);
285 285
286 286 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
287 287 return (EINTR);
288 288
289 289 /* free anything past the end of the file */
290 290 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
291 291 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
292 292 return (EINTR);
293 293 if (dsp->dsa_err)
294 294 return (EINTR);
295 295 return (0);
296 296 }
297 297
298 298 #define BP_SPAN(dnp, level) \
299 299 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
300 300 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
301 301
302 302 /* ARGSUSED */
303 303 static int
304 304 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
305 305 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
306 306 {
307 307 dmu_sendarg_t *dsp = arg;
308 308 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
309 309 int err = 0;
310 310
311 311 if (issig(JUSTLOOKING) && issig(FORREAL))
312 312 return (EINTR);
313 313
314 314 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
315 315 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
316 316 return (0);
317 317 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
318 318 uint64_t span = BP_SPAN(dnp, zb->zb_level);
319 319 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
320 320 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
321 321 } else if (bp == NULL) {
322 322 uint64_t span = BP_SPAN(dnp, zb->zb_level);
323 323 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
324 324 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
325 325 return (0);
326 326 } else if (type == DMU_OT_DNODE) {
327 327 dnode_phys_t *blk;
328 328 int i;
329 329 int blksz = BP_GET_LSIZE(bp);
330 330 uint32_t aflags = ARC_WAIT;
331 331 arc_buf_t *abuf;
332 332
333 333 if (dsl_read(NULL, spa, bp, pbuf,
334 334 arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
335 335 ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
336 336 return (EIO);
337 337
338 338 blk = abuf->b_data;
339 339 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
340 340 uint64_t dnobj = (zb->zb_blkid <<
341 341 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
342 342 err = dump_dnode(dsp, dnobj, blk+i);
343 343 if (err)
344 344 break;
345 345 }
346 346 (void) arc_buf_remove_ref(abuf, &abuf);
347 347 } else if (type == DMU_OT_SA) {
348 348 uint32_t aflags = ARC_WAIT;
349 349 arc_buf_t *abuf;
350 350 int blksz = BP_GET_LSIZE(bp);
351 351
352 352 if (arc_read_nolock(NULL, spa, bp,
353 353 arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
354 354 ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
355 355 return (EIO);
356 356
357 357 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
358 358 (void) arc_buf_remove_ref(abuf, &abuf);
359 359 } else { /* it's a level-0 block of a regular object */
360 360 uint32_t aflags = ARC_WAIT;
361 361 arc_buf_t *abuf;
362 362 int blksz = BP_GET_LSIZE(bp);
363 363
364 364 if (dsl_read(NULL, spa, bp, pbuf,
365 365 arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
366 366 ZIO_FLAG_CANFAIL, &aflags, zb) != 0) {
367 367 if (zfs_send_corrupt_data) {
368 368 /* Send a block filled with 0x"zfs badd bloc" */
369 369 abuf = arc_buf_alloc(spa, blksz, &abuf,
370 370 ARC_BUFC_DATA);
371 371 uint64_t *ptr;
372 372 for (ptr = abuf->b_data;
373 373 (char *)ptr < (char *)abuf->b_data + blksz;
374 374 ptr++)
375 375 *ptr = 0x2f5baddb10c;
376 376 } else {
377 377 return (EIO);
378 378 }
379 379 }
↓ open down ↓ |
379 lines elided |
↑ open up ↑ |
380 380
381 381 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
382 382 blksz, bp, abuf->b_data);
383 383 (void) arc_buf_remove_ref(abuf, &abuf);
384 384 }
385 385
386 386 ASSERT(err == 0 || err == EINTR);
387 387 return (err);
388 388 }
389 389
390 +/*
391 + * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
392 + * For example, they could both be snapshots of the same filesystem, and
393 + * 'earlier' is before 'later'. Or 'earlier' could be the origin of
394 + * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
395 + * filesystem. Or 'earlier' could be the origin's origin.
396 + */
397 +static boolean_t
398 +is_before(dsl_dataset_t *later, dsl_dataset_t *earlier)
399 +{
400 + dsl_pool_t *dp = later->ds_dir->dd_pool;
401 + int error;
402 + boolean_t ret;
403 + dsl_dataset_t *origin;
404 +
405 + if (earlier->ds_phys->ds_creation_txg >=
406 + later->ds_phys->ds_creation_txg)
407 + return (B_FALSE);
408 +
409 + if (later->ds_dir == earlier->ds_dir)
410 + return (B_TRUE);
411 + if (!dsl_dir_is_clone(later->ds_dir))
412 + return (B_FALSE);
413 +
414 + rw_enter(&dp->dp_config_rwlock, RW_READER);
415 + if (later->ds_dir->dd_phys->dd_origin_obj == earlier->ds_object) {
416 + rw_exit(&dp->dp_config_rwlock);
417 + return (B_TRUE);
418 + }
419 + error = dsl_dataset_hold_obj(dp,
420 + later->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin);
421 + rw_exit(&dp->dp_config_rwlock);
422 + if (error != 0)
423 + return (B_FALSE);
424 + ret = is_before(origin, earlier);
425 + dsl_dataset_rele(origin, FTAG);
426 + return (ret);
427 +}
428 +
390 429 int
391 -dmu_send(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
392 - int outfd, vnode_t *vp, offset_t *off)
430 +dmu_send(objset_t *tosnap, objset_t *fromsnap, int outfd, vnode_t *vp,
431 + offset_t *off)
393 432 {
394 433 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
395 434 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
396 435 dmu_replay_record_t *drr;
397 436 dmu_sendarg_t *dsp;
398 437 int err;
399 438 uint64_t fromtxg = 0;
400 439
401 440 /* tosnap must be a snapshot */
402 441 if (ds->ds_phys->ds_next_snap_obj == 0)
403 442 return (EINVAL);
404 443
405 - /* fromsnap must be an earlier snapshot from the same fs as tosnap */
406 - if (fromds && (ds->ds_dir != fromds->ds_dir ||
407 - fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
444 + /*
445 + * fromsnap must be an earlier snapshot from the same fs as tosnap,
446 + * or the origin's fs.
447 + */
448 + if (fromds != NULL && !is_before(ds, fromds))
408 449 return (EXDEV);
409 450
410 - if (fromorigin) {
411 - dsl_pool_t *dp = ds->ds_dir->dd_pool;
412 -
413 - if (fromsnap)
414 - return (EINVAL);
415 -
416 - if (dsl_dir_is_clone(ds->ds_dir)) {
417 - rw_enter(&dp->dp_config_rwlock, RW_READER);
418 - err = dsl_dataset_hold_obj(dp,
419 - ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
420 - rw_exit(&dp->dp_config_rwlock);
421 - if (err)
422 - return (err);
423 - } else {
424 - fromorigin = B_FALSE;
425 - }
426 - }
427 -
428 -
429 451 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
430 452 drr->drr_type = DRR_BEGIN;
431 453 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
432 454 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
433 455 DMU_SUBSTREAM);
434 456
435 457 #ifdef _KERNEL
436 458 if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
437 459 uint64_t version;
438 460 if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0) {
439 461 kmem_free(drr, sizeof (dmu_replay_record_t));
440 462 return (EINVAL);
441 463 }
442 464 if (version == ZPL_VERSION_SA) {
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
443 465 DMU_SET_FEATUREFLAGS(
444 466 drr->drr_u.drr_begin.drr_versioninfo,
445 467 DMU_BACKUP_FEATURE_SA_SPILL);
446 468 }
447 469 }
448 470 #endif
449 471
450 472 drr->drr_u.drr_begin.drr_creation_time =
451 473 ds->ds_phys->ds_creation_time;
452 474 drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
453 - if (fromorigin)
475 + if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
454 476 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
455 477 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
456 478 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
457 479 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
458 480
459 481 if (fromds)
460 482 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
461 483 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
462 484
463 485 if (fromds)
464 486 fromtxg = fromds->ds_phys->ds_creation_txg;
465 - if (fromorigin)
466 - dsl_dataset_rele(fromds, FTAG);
467 487
468 488 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
469 489
470 490 dsp->dsa_drr = drr;
471 491 dsp->dsa_vp = vp;
472 492 dsp->dsa_outfd = outfd;
473 493 dsp->dsa_proc = curproc;
474 494 dsp->dsa_os = tosnap;
475 495 dsp->dsa_off = off;
476 496 dsp->dsa_toguid = ds->ds_phys->ds_guid;
477 497 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
478 498 dsp->dsa_pending_op = PENDING_NONE;
479 499
480 500 mutex_enter(&ds->ds_sendstream_lock);
481 501 list_insert_head(&ds->ds_sendstreams, dsp);
482 502 mutex_exit(&ds->ds_sendstream_lock);
483 503
484 504 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
485 505 err = dsp->dsa_err;
486 506 goto out;
487 507 }
488 508
489 509 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
490 510 backup_cb, dsp);
491 511
492 512 if (dsp->dsa_pending_op != PENDING_NONE)
493 513 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
494 514 err = EINTR;
495 515
496 516 if (err) {
497 517 if (err == EINTR && dsp->dsa_err)
498 518 err = dsp->dsa_err;
499 519 goto out;
500 520 }
501 521
502 522 bzero(drr, sizeof (dmu_replay_record_t));
503 523 drr->drr_type = DRR_END;
504 524 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
505 525 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
506 526
507 527 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
508 528 err = dsp->dsa_err;
509 529 goto out;
510 530 }
511 531
512 532 out:
513 533 mutex_enter(&ds->ds_sendstream_lock);
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
514 534 list_remove(&ds->ds_sendstreams, dsp);
515 535 mutex_exit(&ds->ds_sendstream_lock);
516 536
517 537 kmem_free(drr, sizeof (dmu_replay_record_t));
518 538 kmem_free(dsp, sizeof (dmu_sendarg_t));
519 539
520 540 return (err);
521 541 }
522 542
523 543 int
524 -dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
525 - uint64_t *sizep)
544 +dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, uint64_t *sizep)
526 545 {
527 546 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
528 547 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
529 548 dsl_pool_t *dp = ds->ds_dir->dd_pool;
530 549 int err;
531 550 uint64_t size;
532 551
533 552 /* tosnap must be a snapshot */
534 553 if (ds->ds_phys->ds_next_snap_obj == 0)
535 554 return (EINVAL);
536 555
537 - /* fromsnap must be an earlier snapshot from the same fs as tosnap */
538 - if (fromds && (ds->ds_dir != fromds->ds_dir ||
539 - fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
556 + /*
557 + * fromsnap must be an earlier snapshot from the same fs as tosnap,
558 + * or the origin's fs.
559 + */
560 + if (fromds != NULL && !is_before(ds, fromds))
540 561 return (EXDEV);
541 562
542 - if (fromorigin) {
543 - if (fromsnap)
544 - return (EINVAL);
545 -
546 - if (dsl_dir_is_clone(ds->ds_dir)) {
547 - rw_enter(&dp->dp_config_rwlock, RW_READER);
548 - err = dsl_dataset_hold_obj(dp,
549 - ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
550 - rw_exit(&dp->dp_config_rwlock);
551 - if (err)
552 - return (err);
553 - } else {
554 - fromorigin = B_FALSE;
555 - }
556 - }
557 -
558 563 /* Get uncompressed size estimate of changed data. */
559 564 if (fromds == NULL) {
560 565 size = ds->ds_phys->ds_uncompressed_bytes;
561 566 } else {
562 567 uint64_t used, comp;
563 568 err = dsl_dataset_space_written(fromds, ds,
564 569 &used, &comp, &size);
565 - if (fromorigin)
566 - dsl_dataset_rele(fromds, FTAG);
567 570 if (err)
568 571 return (err);
569 572 }
570 573
571 574 /*
572 575 * Assume that space (both on-disk and in-stream) is dominated by
573 576 * data. We will adjust for indirect blocks and the copies property,
574 577 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
575 578 */
576 579
577 580 /*
578 581 * Subtract out approximate space used by indirect blocks.
579 582 * Assume most space is used by data blocks (non-indirect, non-dnode).
580 583 * Assume all blocks are recordsize. Assume ditto blocks and
581 584 * internal fragmentation counter out compression.
582 585 *
583 586 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
584 587 * block, which we observe in practice.
585 588 */
586 589 uint64_t recordsize;
587 590 rw_enter(&dp->dp_config_rwlock, RW_READER);
588 591 err = dsl_prop_get_ds(ds, "recordsize",
589 592 sizeof (recordsize), 1, &recordsize, NULL);
590 593 rw_exit(&dp->dp_config_rwlock);
591 594 if (err)
592 595 return (err);
593 596 size -= size / recordsize * sizeof (blkptr_t);
594 597
595 598 /* Add in the space for the record associated with each block. */
596 599 size += size / recordsize * sizeof (dmu_replay_record_t);
597 600
598 601 *sizep = size;
599 602
600 603 return (0);
601 604 }
602 605
603 606 struct recvbeginsyncarg {
604 607 const char *tofs;
605 608 const char *tosnap;
606 609 dsl_dataset_t *origin;
607 610 uint64_t fromguid;
608 611 dmu_objset_type_t type;
609 612 void *tag;
610 613 boolean_t force;
611 614 uint64_t dsflags;
612 615 char clonelastname[MAXNAMELEN];
613 616 dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */
614 617 cred_t *cr;
615 618 };
616 619
617 620 /* ARGSUSED */
618 621 static int
619 622 recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx)
620 623 {
621 624 dsl_dir_t *dd = arg1;
622 625 struct recvbeginsyncarg *rbsa = arg2;
623 626 objset_t *mos = dd->dd_pool->dp_meta_objset;
624 627 uint64_t val;
625 628 int err;
626 629
627 630 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
628 631 strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
629 632
630 633 if (err != ENOENT)
631 634 return (err ? err : EEXIST);
632 635
633 636 if (rbsa->origin) {
634 637 /* make sure it's a snap in the same pool */
635 638 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
636 639 return (EXDEV);
637 640 if (!dsl_dataset_is_snapshot(rbsa->origin))
638 641 return (EINVAL);
639 642 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
640 643 return (ENODEV);
641 644 }
642 645
643 646 return (0);
644 647 }
645 648
646 649 static void
647 650 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
648 651 {
649 652 dsl_dir_t *dd = arg1;
650 653 struct recvbeginsyncarg *rbsa = arg2;
651 654 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
652 655 uint64_t dsobj;
653 656
654 657 /* Create and open new dataset. */
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
655 658 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
656 659 rbsa->origin, flags, rbsa->cr, tx);
657 660 VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
658 661 B_TRUE, dmu_recv_tag, &rbsa->ds));
659 662
660 663 if (rbsa->origin == NULL) {
661 664 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
662 665 rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
663 666 }
664 667
665 - spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC,
666 - dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj);
668 + spa_history_log_internal_ds(rbsa->ds, "receive new", tx, "");
667 669 }
668 670
669 671 /* ARGSUSED */
670 672 static int
671 673 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
672 674 {
673 675 dsl_dataset_t *ds = arg1;
674 676 struct recvbeginsyncarg *rbsa = arg2;
675 677 int err;
676 678 uint64_t val;
677 679
678 680 /* must not have any changes since most recent snapshot */
679 681 if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
680 682 return (ETXTBSY);
681 683
682 684 /* new snapshot name must not exist */
683 685 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
684 686 ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
685 687 if (err == 0)
686 688 return (EEXIST);
687 689 if (err != ENOENT)
688 690 return (err);
689 691
690 692 if (rbsa->fromguid) {
691 693 /* if incremental, most recent snapshot must match fromguid */
692 694 if (ds->ds_prev == NULL)
693 695 return (ENODEV);
694 696
695 697 /*
696 698 * most recent snapshot must match fromguid, or there are no
697 699 * changes since the fromguid one
698 700 */
699 701 if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) {
700 702 uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
701 703 uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
702 704 while (obj != 0) {
703 705 dsl_dataset_t *snap;
704 706 err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
705 707 obj, FTAG, &snap);
706 708 if (err)
707 709 return (ENODEV);
708 710 if (snap->ds_phys->ds_creation_txg < birth) {
709 711 dsl_dataset_rele(snap, FTAG);
710 712 return (ENODEV);
711 713 }
712 714 if (snap->ds_phys->ds_guid == rbsa->fromguid) {
713 715 dsl_dataset_rele(snap, FTAG);
714 716 break; /* it's ok */
715 717 }
716 718 obj = snap->ds_phys->ds_prev_snap_obj;
717 719 dsl_dataset_rele(snap, FTAG);
718 720 }
719 721 if (obj == 0)
720 722 return (ENODEV);
721 723 }
722 724 } else {
723 725 /* if full, most recent snapshot must be $ORIGIN */
724 726 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
725 727 return (ENODEV);
726 728 }
727 729
728 730 /* temporary clone name must not exist */
729 731 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
730 732 ds->ds_dir->dd_phys->dd_child_dir_zapobj,
731 733 rbsa->clonelastname, 8, 1, &val);
732 734 if (err == 0)
733 735 return (EEXIST);
734 736 if (err != ENOENT)
735 737 return (err);
736 738
737 739 return (0);
738 740 }
739 741
740 742 /* ARGSUSED */
741 743 static void
742 744 recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
743 745 {
744 746 dsl_dataset_t *ohds = arg1;
745 747 struct recvbeginsyncarg *rbsa = arg2;
746 748 dsl_pool_t *dp = ohds->ds_dir->dd_pool;
747 749 dsl_dataset_t *cds;
748 750 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
749 751 uint64_t dsobj;
750 752
751 753 /* create and open the temporary clone */
752 754 dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
753 755 ohds->ds_prev, flags, rbsa->cr, tx);
754 756 VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
755 757
756 758 /*
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
757 759 * If we actually created a non-clone, we need to create the
758 760 * objset in our new dataset.
759 761 */
760 762 if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
761 763 (void) dmu_objset_create_impl(dp->dp_spa,
762 764 cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
763 765 }
764 766
765 767 rbsa->ds = cds;
766 768
767 - spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC,
768 - dp->dp_spa, tx, "dataset = %lld", dsobj);
769 + spa_history_log_internal_ds(cds, "receive over existing", tx, "");
769 770 }
770 771
771 772 static boolean_t
772 773 dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
773 774 {
774 775 int featureflags;
775 776
776 777 featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
777 778
778 779 /* Verify pool version supports SA if SA_SPILL feature set */
779 780 return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
780 781 (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
781 782 }
782 783
783 784 /*
784 785 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
785 786 * succeeds; otherwise we will leak the holds on the datasets.
786 787 */
787 788 int
788 789 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
789 790 boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc)
790 791 {
791 792 int err = 0;
792 793 boolean_t byteswap;
793 794 struct recvbeginsyncarg rbsa = { 0 };
794 795 uint64_t versioninfo;
795 796 int flags;
796 797 dsl_dataset_t *ds;
797 798
798 799 if (drrb->drr_magic == DMU_BACKUP_MAGIC)
799 800 byteswap = FALSE;
800 801 else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
801 802 byteswap = TRUE;
802 803 else
803 804 return (EINVAL);
804 805
805 806 rbsa.tofs = tofs;
806 807 rbsa.tosnap = tosnap;
807 808 rbsa.origin = origin ? origin->os_dsl_dataset : NULL;
808 809 rbsa.fromguid = drrb->drr_fromguid;
809 810 rbsa.type = drrb->drr_type;
810 811 rbsa.tag = FTAG;
811 812 rbsa.dsflags = 0;
812 813 rbsa.cr = CRED();
813 814 versioninfo = drrb->drr_versioninfo;
814 815 flags = drrb->drr_flags;
815 816
816 817 if (byteswap) {
817 818 rbsa.type = BSWAP_32(rbsa.type);
818 819 rbsa.fromguid = BSWAP_64(rbsa.fromguid);
819 820 versioninfo = BSWAP_64(versioninfo);
820 821 flags = BSWAP_32(flags);
821 822 }
822 823
823 824 if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM ||
824 825 rbsa.type >= DMU_OST_NUMTYPES ||
825 826 ((flags & DRR_FLAG_CLONE) && origin == NULL))
826 827 return (EINVAL);
827 828
828 829 if (flags & DRR_FLAG_CI_DATA)
829 830 rbsa.dsflags = DS_FLAG_CI_DATASET;
830 831
831 832 bzero(drc, sizeof (dmu_recv_cookie_t));
832 833 drc->drc_drrb = drrb;
833 834 drc->drc_tosnap = tosnap;
834 835 drc->drc_top_ds = top_ds;
835 836 drc->drc_force = force;
836 837
837 838 /*
838 839 * Process the begin in syncing context.
839 840 */
840 841
841 842 /* open the dataset we are logically receiving into */
842 843 err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
843 844 if (err == 0) {
844 845 if (dmu_recv_verify_features(ds, drrb)) {
845 846 dsl_dataset_rele(ds, dmu_recv_tag);
846 847 return (ENOTSUP);
847 848 }
848 849 /* target fs already exists; recv into temp clone */
849 850
850 851 /* Can't recv a clone into an existing fs */
851 852 if (flags & DRR_FLAG_CLONE) {
852 853 dsl_dataset_rele(ds, dmu_recv_tag);
853 854 return (EINVAL);
854 855 }
855 856
856 857 /* must not have an incremental recv already in progress */
857 858 if (!mutex_tryenter(&ds->ds_recvlock)) {
858 859 dsl_dataset_rele(ds, dmu_recv_tag);
859 860 return (EBUSY);
860 861 }
861 862
862 863 /* tmp clone name is: tofs/%tosnap" */
863 864 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
864 865 "%%%s", tosnap);
865 866 rbsa.force = force;
866 867 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
867 868 recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
868 869 if (err) {
869 870 mutex_exit(&ds->ds_recvlock);
870 871 dsl_dataset_rele(ds, dmu_recv_tag);
871 872 return (err);
872 873 }
873 874 drc->drc_logical_ds = ds;
874 875 drc->drc_real_ds = rbsa.ds;
875 876 } else if (err == ENOENT) {
876 877 /* target fs does not exist; must be a full backup or clone */
877 878 char *cp;
878 879
879 880 /*
880 881 * If it's a non-clone incremental, we are missing the
881 882 * target fs, so fail the recv.
882 883 */
883 884 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
884 885 return (ENOENT);
885 886
886 887 /* Open the parent of tofs */
887 888 cp = strrchr(tofs, '/');
888 889 *cp = '\0';
889 890 err = dsl_dataset_hold(tofs, FTAG, &ds);
890 891 *cp = '/';
891 892 if (err)
892 893 return (err);
893 894
894 895 if (dmu_recv_verify_features(ds, drrb)) {
895 896 dsl_dataset_rele(ds, FTAG);
896 897 return (ENOTSUP);
897 898 }
898 899
899 900 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
900 901 recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
901 902 dsl_dataset_rele(ds, FTAG);
902 903 if (err)
903 904 return (err);
904 905 drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
905 906 drc->drc_newfs = B_TRUE;
906 907 }
907 908
908 909 return (err);
909 910 }
910 911
911 912 struct restorearg {
912 913 int err;
913 914 int byteswap;
914 915 vnode_t *vp;
915 916 char *buf;
916 917 uint64_t voff;
917 918 int bufsize; /* amount of memory allocated for buf */
918 919 zio_cksum_t cksum;
919 920 avl_tree_t *guid_to_ds_map;
920 921 };
921 922
922 923 typedef struct guid_map_entry {
923 924 uint64_t guid;
924 925 dsl_dataset_t *gme_ds;
925 926 avl_node_t avlnode;
926 927 } guid_map_entry_t;
927 928
928 929 static int
929 930 guid_compare(const void *arg1, const void *arg2)
930 931 {
931 932 const guid_map_entry_t *gmep1 = arg1;
932 933 const guid_map_entry_t *gmep2 = arg2;
933 934
934 935 if (gmep1->guid < gmep2->guid)
935 936 return (-1);
936 937 else if (gmep1->guid > gmep2->guid)
937 938 return (1);
938 939 return (0);
939 940 }
940 941
941 942 static void
942 943 free_guid_map_onexit(void *arg)
943 944 {
944 945 avl_tree_t *ca = arg;
945 946 void *cookie = NULL;
946 947 guid_map_entry_t *gmep;
947 948
948 949 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
949 950 dsl_dataset_rele(gmep->gme_ds, ca);
950 951 kmem_free(gmep, sizeof (guid_map_entry_t));
951 952 }
952 953 avl_destroy(ca);
953 954 kmem_free(ca, sizeof (avl_tree_t));
954 955 }
955 956
956 957 static void *
957 958 restore_read(struct restorearg *ra, int len)
958 959 {
959 960 void *rv;
960 961 int done = 0;
961 962
962 963 /* some things will require 8-byte alignment, so everything must */
963 964 ASSERT3U(len % 8, ==, 0);
964 965
965 966 while (done < len) {
966 967 ssize_t resid;
967 968
968 969 ra->err = vn_rdwr(UIO_READ, ra->vp,
969 970 (caddr_t)ra->buf + done, len - done,
970 971 ra->voff, UIO_SYSSPACE, FAPPEND,
971 972 RLIM64_INFINITY, CRED(), &resid);
972 973
973 974 if (resid == len - done)
974 975 ra->err = EINVAL;
975 976 ra->voff += len - done - resid;
976 977 done = len - resid;
977 978 if (ra->err)
978 979 return (NULL);
979 980 }
980 981
981 982 ASSERT3U(done, ==, len);
982 983 rv = ra->buf;
983 984 if (ra->byteswap)
984 985 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
985 986 else
986 987 fletcher_4_incremental_native(rv, len, &ra->cksum);
987 988 return (rv);
988 989 }
989 990
990 991 static void
991 992 backup_byteswap(dmu_replay_record_t *drr)
992 993 {
993 994 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
994 995 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
995 996 drr->drr_type = BSWAP_32(drr->drr_type);
996 997 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
997 998 switch (drr->drr_type) {
998 999 case DRR_BEGIN:
999 1000 DO64(drr_begin.drr_magic);
1000 1001 DO64(drr_begin.drr_versioninfo);
1001 1002 DO64(drr_begin.drr_creation_time);
1002 1003 DO32(drr_begin.drr_type);
1003 1004 DO32(drr_begin.drr_flags);
1004 1005 DO64(drr_begin.drr_toguid);
1005 1006 DO64(drr_begin.drr_fromguid);
1006 1007 break;
1007 1008 case DRR_OBJECT:
1008 1009 DO64(drr_object.drr_object);
1009 1010 /* DO64(drr_object.drr_allocation_txg); */
1010 1011 DO32(drr_object.drr_type);
1011 1012 DO32(drr_object.drr_bonustype);
1012 1013 DO32(drr_object.drr_blksz);
1013 1014 DO32(drr_object.drr_bonuslen);
1014 1015 DO64(drr_object.drr_toguid);
1015 1016 break;
1016 1017 case DRR_FREEOBJECTS:
1017 1018 DO64(drr_freeobjects.drr_firstobj);
1018 1019 DO64(drr_freeobjects.drr_numobjs);
1019 1020 DO64(drr_freeobjects.drr_toguid);
1020 1021 break;
1021 1022 case DRR_WRITE:
1022 1023 DO64(drr_write.drr_object);
1023 1024 DO32(drr_write.drr_type);
1024 1025 DO64(drr_write.drr_offset);
1025 1026 DO64(drr_write.drr_length);
1026 1027 DO64(drr_write.drr_toguid);
1027 1028 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1028 1029 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1029 1030 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1030 1031 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1031 1032 DO64(drr_write.drr_key.ddk_prop);
1032 1033 break;
1033 1034 case DRR_WRITE_BYREF:
1034 1035 DO64(drr_write_byref.drr_object);
1035 1036 DO64(drr_write_byref.drr_offset);
1036 1037 DO64(drr_write_byref.drr_length);
1037 1038 DO64(drr_write_byref.drr_toguid);
1038 1039 DO64(drr_write_byref.drr_refguid);
1039 1040 DO64(drr_write_byref.drr_refobject);
1040 1041 DO64(drr_write_byref.drr_refoffset);
1041 1042 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1042 1043 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1043 1044 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1044 1045 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1045 1046 DO64(drr_write_byref.drr_key.ddk_prop);
1046 1047 break;
1047 1048 case DRR_FREE:
1048 1049 DO64(drr_free.drr_object);
1049 1050 DO64(drr_free.drr_offset);
1050 1051 DO64(drr_free.drr_length);
1051 1052 DO64(drr_free.drr_toguid);
1052 1053 break;
1053 1054 case DRR_SPILL:
1054 1055 DO64(drr_spill.drr_object);
1055 1056 DO64(drr_spill.drr_length);
1056 1057 DO64(drr_spill.drr_toguid);
1057 1058 break;
1058 1059 case DRR_END:
1059 1060 DO64(drr_end.drr_checksum.zc_word[0]);
1060 1061 DO64(drr_end.drr_checksum.zc_word[1]);
1061 1062 DO64(drr_end.drr_checksum.zc_word[2]);
1062 1063 DO64(drr_end.drr_checksum.zc_word[3]);
1063 1064 DO64(drr_end.drr_toguid);
1064 1065 break;
1065 1066 }
1066 1067 #undef DO64
1067 1068 #undef DO32
1068 1069 }
1069 1070
1070 1071 static int
1071 1072 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1072 1073 {
1073 1074 int err;
1074 1075 dmu_tx_t *tx;
1075 1076 void *data = NULL;
1076 1077
1077 1078 if (drro->drr_type == DMU_OT_NONE ||
1078 1079 !DMU_OT_IS_VALID(drro->drr_type) ||
1079 1080 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1080 1081 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1081 1082 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1082 1083 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1083 1084 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1084 1085 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1085 1086 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1086 1087 return (EINVAL);
1087 1088 }
1088 1089
1089 1090 err = dmu_object_info(os, drro->drr_object, NULL);
1090 1091
1091 1092 if (err != 0 && err != ENOENT)
1092 1093 return (EINVAL);
1093 1094
1094 1095 if (drro->drr_bonuslen) {
1095 1096 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1096 1097 if (ra->err)
1097 1098 return (ra->err);
1098 1099 }
1099 1100
1100 1101 if (err == ENOENT) {
1101 1102 /* currently free, want to be allocated */
1102 1103 tx = dmu_tx_create(os);
1103 1104 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1104 1105 err = dmu_tx_assign(tx, TXG_WAIT);
1105 1106 if (err) {
1106 1107 dmu_tx_abort(tx);
1107 1108 return (err);
1108 1109 }
1109 1110 err = dmu_object_claim(os, drro->drr_object,
1110 1111 drro->drr_type, drro->drr_blksz,
1111 1112 drro->drr_bonustype, drro->drr_bonuslen, tx);
1112 1113 dmu_tx_commit(tx);
1113 1114 } else {
1114 1115 /* currently allocated, want to be allocated */
1115 1116 err = dmu_object_reclaim(os, drro->drr_object,
1116 1117 drro->drr_type, drro->drr_blksz,
1117 1118 drro->drr_bonustype, drro->drr_bonuslen);
1118 1119 }
1119 1120 if (err) {
1120 1121 return (EINVAL);
1121 1122 }
1122 1123
1123 1124 tx = dmu_tx_create(os);
1124 1125 dmu_tx_hold_bonus(tx, drro->drr_object);
1125 1126 err = dmu_tx_assign(tx, TXG_WAIT);
1126 1127 if (err) {
1127 1128 dmu_tx_abort(tx);
1128 1129 return (err);
1129 1130 }
1130 1131
1131 1132 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1132 1133 tx);
1133 1134 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1134 1135
1135 1136 if (data != NULL) {
1136 1137 dmu_buf_t *db;
1137 1138
1138 1139 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1139 1140 dmu_buf_will_dirty(db, tx);
1140 1141
1141 1142 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1142 1143 bcopy(data, db->db_data, drro->drr_bonuslen);
1143 1144 if (ra->byteswap) {
1144 1145 dmu_object_byteswap_t byteswap =
1145 1146 DMU_OT_BYTESWAP(drro->drr_bonustype);
1146 1147 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1147 1148 drro->drr_bonuslen);
1148 1149 }
1149 1150 dmu_buf_rele(db, FTAG);
1150 1151 }
1151 1152 dmu_tx_commit(tx);
1152 1153 return (0);
1153 1154 }
1154 1155
1155 1156 /* ARGSUSED */
1156 1157 static int
1157 1158 restore_freeobjects(struct restorearg *ra, objset_t *os,
1158 1159 struct drr_freeobjects *drrfo)
1159 1160 {
1160 1161 uint64_t obj;
1161 1162
1162 1163 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1163 1164 return (EINVAL);
1164 1165
1165 1166 for (obj = drrfo->drr_firstobj;
1166 1167 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1167 1168 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1168 1169 int err;
1169 1170
1170 1171 if (dmu_object_info(os, obj, NULL) != 0)
1171 1172 continue;
1172 1173
1173 1174 err = dmu_free_object(os, obj);
1174 1175 if (err)
1175 1176 return (err);
1176 1177 }
1177 1178 return (0);
1178 1179 }
1179 1180
1180 1181 static int
1181 1182 restore_write(struct restorearg *ra, objset_t *os,
1182 1183 struct drr_write *drrw)
1183 1184 {
1184 1185 dmu_tx_t *tx;
1185 1186 void *data;
1186 1187 int err;
1187 1188
1188 1189 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1189 1190 !DMU_OT_IS_VALID(drrw->drr_type))
1190 1191 return (EINVAL);
1191 1192
1192 1193 data = restore_read(ra, drrw->drr_length);
1193 1194 if (data == NULL)
1194 1195 return (ra->err);
1195 1196
1196 1197 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1197 1198 return (EINVAL);
1198 1199
1199 1200 tx = dmu_tx_create(os);
1200 1201
1201 1202 dmu_tx_hold_write(tx, drrw->drr_object,
1202 1203 drrw->drr_offset, drrw->drr_length);
1203 1204 err = dmu_tx_assign(tx, TXG_WAIT);
1204 1205 if (err) {
1205 1206 dmu_tx_abort(tx);
1206 1207 return (err);
1207 1208 }
1208 1209 if (ra->byteswap) {
1209 1210 dmu_object_byteswap_t byteswap =
1210 1211 DMU_OT_BYTESWAP(drrw->drr_type);
1211 1212 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1212 1213 }
1213 1214 dmu_write(os, drrw->drr_object,
1214 1215 drrw->drr_offset, drrw->drr_length, data, tx);
1215 1216 dmu_tx_commit(tx);
1216 1217 return (0);
1217 1218 }
1218 1219
1219 1220 /*
1220 1221 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1221 1222 * streams to refer to a copy of the data that is already on the
1222 1223 * system because it came in earlier in the stream. This function
1223 1224 * finds the earlier copy of the data, and uses that copy instead of
1224 1225 * data from the stream to fulfill this write.
1225 1226 */
1226 1227 static int
1227 1228 restore_write_byref(struct restorearg *ra, objset_t *os,
1228 1229 struct drr_write_byref *drrwbr)
1229 1230 {
1230 1231 dmu_tx_t *tx;
1231 1232 int err;
1232 1233 guid_map_entry_t gmesrch;
1233 1234 guid_map_entry_t *gmep;
1234 1235 avl_index_t where;
1235 1236 objset_t *ref_os = NULL;
1236 1237 dmu_buf_t *dbp;
1237 1238
1238 1239 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1239 1240 return (EINVAL);
1240 1241
1241 1242 /*
1242 1243 * If the GUID of the referenced dataset is different from the
1243 1244 * GUID of the target dataset, find the referenced dataset.
1244 1245 */
1245 1246 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1246 1247 gmesrch.guid = drrwbr->drr_refguid;
1247 1248 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1248 1249 &where)) == NULL) {
1249 1250 return (EINVAL);
1250 1251 }
1251 1252 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1252 1253 return (EINVAL);
1253 1254 } else {
1254 1255 ref_os = os;
1255 1256 }
1256 1257
1257 1258 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1258 1259 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1259 1260 return (err);
1260 1261
1261 1262 tx = dmu_tx_create(os);
1262 1263
1263 1264 dmu_tx_hold_write(tx, drrwbr->drr_object,
1264 1265 drrwbr->drr_offset, drrwbr->drr_length);
1265 1266 err = dmu_tx_assign(tx, TXG_WAIT);
1266 1267 if (err) {
1267 1268 dmu_tx_abort(tx);
1268 1269 return (err);
1269 1270 }
1270 1271 dmu_write(os, drrwbr->drr_object,
1271 1272 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1272 1273 dmu_buf_rele(dbp, FTAG);
1273 1274 dmu_tx_commit(tx);
1274 1275 return (0);
1275 1276 }
1276 1277
1277 1278 static int
1278 1279 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1279 1280 {
1280 1281 dmu_tx_t *tx;
1281 1282 void *data;
1282 1283 dmu_buf_t *db, *db_spill;
1283 1284 int err;
1284 1285
1285 1286 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1286 1287 drrs->drr_length > SPA_MAXBLOCKSIZE)
1287 1288 return (EINVAL);
1288 1289
1289 1290 data = restore_read(ra, drrs->drr_length);
1290 1291 if (data == NULL)
1291 1292 return (ra->err);
1292 1293
1293 1294 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1294 1295 return (EINVAL);
1295 1296
1296 1297 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1297 1298 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1298 1299 dmu_buf_rele(db, FTAG);
1299 1300 return (err);
1300 1301 }
1301 1302
1302 1303 tx = dmu_tx_create(os);
1303 1304
1304 1305 dmu_tx_hold_spill(tx, db->db_object);
1305 1306
1306 1307 err = dmu_tx_assign(tx, TXG_WAIT);
1307 1308 if (err) {
1308 1309 dmu_buf_rele(db, FTAG);
1309 1310 dmu_buf_rele(db_spill, FTAG);
1310 1311 dmu_tx_abort(tx);
1311 1312 return (err);
1312 1313 }
1313 1314 dmu_buf_will_dirty(db_spill, tx);
1314 1315
1315 1316 if (db_spill->db_size < drrs->drr_length)
1316 1317 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1317 1318 drrs->drr_length, tx));
1318 1319 bcopy(data, db_spill->db_data, drrs->drr_length);
1319 1320
1320 1321 dmu_buf_rele(db, FTAG);
1321 1322 dmu_buf_rele(db_spill, FTAG);
1322 1323
1323 1324 dmu_tx_commit(tx);
1324 1325 return (0);
1325 1326 }
1326 1327
1327 1328 /* ARGSUSED */
1328 1329 static int
1329 1330 restore_free(struct restorearg *ra, objset_t *os,
1330 1331 struct drr_free *drrf)
1331 1332 {
1332 1333 int err;
1333 1334
1334 1335 if (drrf->drr_length != -1ULL &&
1335 1336 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1336 1337 return (EINVAL);
1337 1338
1338 1339 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1339 1340 return (EINVAL);
1340 1341
1341 1342 err = dmu_free_long_range(os, drrf->drr_object,
1342 1343 drrf->drr_offset, drrf->drr_length);
1343 1344 return (err);
1344 1345 }
1345 1346
1346 1347 /*
1347 1348 * NB: callers *must* call dmu_recv_end() if this succeeds.
1348 1349 */
1349 1350 int
1350 1351 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1351 1352 int cleanup_fd, uint64_t *action_handlep)
1352 1353 {
1353 1354 struct restorearg ra = { 0 };
1354 1355 dmu_replay_record_t *drr;
1355 1356 objset_t *os;
1356 1357 zio_cksum_t pcksum;
1357 1358 int featureflags;
1358 1359
1359 1360 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1360 1361 ra.byteswap = TRUE;
1361 1362
1362 1363 {
1363 1364 /* compute checksum of drr_begin record */
1364 1365 dmu_replay_record_t *drr;
1365 1366 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1366 1367
1367 1368 drr->drr_type = DRR_BEGIN;
1368 1369 drr->drr_u.drr_begin = *drc->drc_drrb;
1369 1370 if (ra.byteswap) {
1370 1371 fletcher_4_incremental_byteswap(drr,
1371 1372 sizeof (dmu_replay_record_t), &ra.cksum);
1372 1373 } else {
1373 1374 fletcher_4_incremental_native(drr,
1374 1375 sizeof (dmu_replay_record_t), &ra.cksum);
1375 1376 }
1376 1377 kmem_free(drr, sizeof (dmu_replay_record_t));
1377 1378 }
1378 1379
1379 1380 if (ra.byteswap) {
1380 1381 struct drr_begin *drrb = drc->drc_drrb;
1381 1382 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1382 1383 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1383 1384 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1384 1385 drrb->drr_type = BSWAP_32(drrb->drr_type);
1385 1386 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1386 1387 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1387 1388 }
1388 1389
1389 1390 ra.vp = vp;
1390 1391 ra.voff = *voffp;
1391 1392 ra.bufsize = 1<<20;
1392 1393 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1393 1394
1394 1395 /* these were verified in dmu_recv_begin */
1395 1396 ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
1396 1397 DMU_SUBSTREAM);
1397 1398 ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES);
1398 1399
1399 1400 /*
1400 1401 * Open the objset we are modifying.
1401 1402 */
1402 1403 VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0);
1403 1404
1404 1405 ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1405 1406
1406 1407 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1407 1408
1408 1409 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1409 1410 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1410 1411 minor_t minor;
1411 1412
1412 1413 if (cleanup_fd == -1) {
1413 1414 ra.err = EBADF;
1414 1415 goto out;
1415 1416 }
1416 1417 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1417 1418 if (ra.err) {
1418 1419 cleanup_fd = -1;
1419 1420 goto out;
1420 1421 }
1421 1422
1422 1423 if (*action_handlep == 0) {
1423 1424 ra.guid_to_ds_map =
1424 1425 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1425 1426 avl_create(ra.guid_to_ds_map, guid_compare,
1426 1427 sizeof (guid_map_entry_t),
1427 1428 offsetof(guid_map_entry_t, avlnode));
1428 1429 ra.err = zfs_onexit_add_cb(minor,
1429 1430 free_guid_map_onexit, ra.guid_to_ds_map,
1430 1431 action_handlep);
1431 1432 if (ra.err)
1432 1433 goto out;
1433 1434 } else {
1434 1435 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1435 1436 (void **)&ra.guid_to_ds_map);
1436 1437 if (ra.err)
1437 1438 goto out;
1438 1439 }
1439 1440
1440 1441 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1441 1442 }
1442 1443
1443 1444 /*
1444 1445 * Read records and process them.
1445 1446 */
1446 1447 pcksum = ra.cksum;
1447 1448 while (ra.err == 0 &&
1448 1449 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1449 1450 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1450 1451 ra.err = EINTR;
1451 1452 goto out;
1452 1453 }
1453 1454
1454 1455 if (ra.byteswap)
1455 1456 backup_byteswap(drr);
1456 1457
1457 1458 switch (drr->drr_type) {
1458 1459 case DRR_OBJECT:
1459 1460 {
1460 1461 /*
1461 1462 * We need to make a copy of the record header,
1462 1463 * because restore_{object,write} may need to
1463 1464 * restore_read(), which will invalidate drr.
1464 1465 */
1465 1466 struct drr_object drro = drr->drr_u.drr_object;
1466 1467 ra.err = restore_object(&ra, os, &drro);
1467 1468 break;
1468 1469 }
1469 1470 case DRR_FREEOBJECTS:
1470 1471 {
1471 1472 struct drr_freeobjects drrfo =
1472 1473 drr->drr_u.drr_freeobjects;
1473 1474 ra.err = restore_freeobjects(&ra, os, &drrfo);
1474 1475 break;
1475 1476 }
1476 1477 case DRR_WRITE:
1477 1478 {
1478 1479 struct drr_write drrw = drr->drr_u.drr_write;
1479 1480 ra.err = restore_write(&ra, os, &drrw);
1480 1481 break;
1481 1482 }
1482 1483 case DRR_WRITE_BYREF:
1483 1484 {
1484 1485 struct drr_write_byref drrwbr =
1485 1486 drr->drr_u.drr_write_byref;
1486 1487 ra.err = restore_write_byref(&ra, os, &drrwbr);
1487 1488 break;
1488 1489 }
1489 1490 case DRR_FREE:
1490 1491 {
1491 1492 struct drr_free drrf = drr->drr_u.drr_free;
1492 1493 ra.err = restore_free(&ra, os, &drrf);
1493 1494 break;
1494 1495 }
1495 1496 case DRR_END:
1496 1497 {
1497 1498 struct drr_end drre = drr->drr_u.drr_end;
1498 1499 /*
1499 1500 * We compare against the *previous* checksum
1500 1501 * value, because the stored checksum is of
1501 1502 * everything before the DRR_END record.
1502 1503 */
1503 1504 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1504 1505 ra.err = ECKSUM;
1505 1506 goto out;
1506 1507 }
1507 1508 case DRR_SPILL:
1508 1509 {
1509 1510 struct drr_spill drrs = drr->drr_u.drr_spill;
1510 1511 ra.err = restore_spill(&ra, os, &drrs);
1511 1512 break;
1512 1513 }
1513 1514 default:
1514 1515 ra.err = EINVAL;
1515 1516 goto out;
1516 1517 }
1517 1518 pcksum = ra.cksum;
1518 1519 }
1519 1520 ASSERT(ra.err != 0);
1520 1521
1521 1522 out:
1522 1523 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1523 1524 zfs_onexit_fd_rele(cleanup_fd);
1524 1525
1525 1526 if (ra.err != 0) {
1526 1527 /*
1527 1528 * destroy what we created, so we don't leave it in the
1528 1529 * inconsistent restoring state.
1529 1530 */
1530 1531 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1531 1532
1532 1533 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1533 1534 B_FALSE);
1534 1535 if (drc->drc_real_ds != drc->drc_logical_ds) {
1535 1536 mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1536 1537 dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1537 1538 }
1538 1539 }
1539 1540
1540 1541 kmem_free(ra.buf, ra.bufsize);
1541 1542 *voffp = ra.voff;
1542 1543 return (ra.err);
1543 1544 }
1544 1545
1545 1546 struct recvendsyncarg {
1546 1547 char *tosnap;
1547 1548 uint64_t creation_time;
1548 1549 uint64_t toguid;
1549 1550 };
1550 1551
1551 1552 static int
1552 1553 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1553 1554 {
1554 1555 dsl_dataset_t *ds = arg1;
1555 1556 struct recvendsyncarg *resa = arg2;
1556 1557
1557 1558 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1558 1559 }
1559 1560
1560 1561 static void
1561 1562 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1562 1563 {
1563 1564 dsl_dataset_t *ds = arg1;
1564 1565 struct recvendsyncarg *resa = arg2;
1565 1566
↓ open down ↓ |
787 lines elided |
↑ open up ↑ |
1566 1567 dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1567 1568
1568 1569 /* set snapshot's creation time and guid */
1569 1570 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1570 1571 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1571 1572 ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1572 1573 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1573 1574
1574 1575 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1575 1576 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1577 + spa_history_log_internal_ds(ds, "finished receiving", tx, "");
1576 1578 }
1577 1579
1578 1580 static int
1579 1581 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1580 1582 {
1581 1583 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1582 1584 uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1583 1585 dsl_dataset_t *snapds;
1584 1586 guid_map_entry_t *gmep;
1585 1587 int err;
1586 1588
1587 1589 ASSERT(guid_map != NULL);
1588 1590
1589 1591 rw_enter(&dp->dp_config_rwlock, RW_READER);
1590 1592 err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1591 1593 if (err == 0) {
1592 1594 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1593 1595 gmep->guid = snapds->ds_phys->ds_guid;
1594 1596 gmep->gme_ds = snapds;
1595 1597 avl_add(guid_map, gmep);
1596 1598 }
1597 1599
1598 1600 rw_exit(&dp->dp_config_rwlock);
1599 1601 return (err);
1600 1602 }
1601 1603
1602 1604 static int
1603 1605 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1604 1606 {
1605 1607 struct recvendsyncarg resa;
1606 1608 dsl_dataset_t *ds = drc->drc_logical_ds;
1607 1609 int err, myerr;
1608 1610
1609 1611 /*
1610 1612 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1611 1613 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1612 1614 * can close it.
1613 1615 */
1614 1616 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1615 1617
1616 1618 if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1617 1619 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1618 1620 drc->drc_force);
1619 1621 if (err)
1620 1622 goto out;
1621 1623 } else {
1622 1624 mutex_exit(&ds->ds_recvlock);
1623 1625 dsl_dataset_rele(ds, dmu_recv_tag);
1624 1626 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1625 1627 B_FALSE);
1626 1628 return (EBUSY);
1627 1629 }
1628 1630
1629 1631 resa.creation_time = drc->drc_drrb->drr_creation_time;
1630 1632 resa.toguid = drc->drc_drrb->drr_toguid;
1631 1633 resa.tosnap = drc->drc_tosnap;
1632 1634
1633 1635 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1634 1636 recv_end_check, recv_end_sync, ds, &resa, 3);
1635 1637 if (err) {
1636 1638 /* swap back */
1637 1639 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1638 1640 }
1639 1641
1640 1642 out:
1641 1643 mutex_exit(&ds->ds_recvlock);
1642 1644 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1643 1645 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1644 1646 dsl_dataset_disown(ds, dmu_recv_tag);
1645 1647 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1646 1648 ASSERT3U(myerr, ==, 0);
1647 1649 return (err);
1648 1650 }
1649 1651
1650 1652 static int
1651 1653 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1652 1654 {
1653 1655 struct recvendsyncarg resa;
1654 1656 dsl_dataset_t *ds = drc->drc_logical_ds;
1655 1657 int err;
1656 1658
1657 1659 /*
1658 1660 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1659 1661 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1660 1662 * can close it.
1661 1663 */
1662 1664 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1663 1665
1664 1666 resa.creation_time = drc->drc_drrb->drr_creation_time;
1665 1667 resa.toguid = drc->drc_drrb->drr_toguid;
1666 1668 resa.tosnap = drc->drc_tosnap;
1667 1669
1668 1670 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1669 1671 recv_end_check, recv_end_sync, ds, &resa, 3);
1670 1672 if (err) {
1671 1673 /* clean up the fs we just recv'd into */
1672 1674 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1673 1675 } else {
1674 1676 if (drc->drc_guid_to_ds_map != NULL)
1675 1677 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1676 1678 /* release the hold from dmu_recv_begin */
1677 1679 dsl_dataset_disown(ds, dmu_recv_tag);
1678 1680 }
1679 1681 return (err);
1680 1682 }
1681 1683
1682 1684 int
1683 1685 dmu_recv_end(dmu_recv_cookie_t *drc)
1684 1686 {
1685 1687 if (drc->drc_logical_ds != drc->drc_real_ds)
1686 1688 return (dmu_recv_existing_end(drc));
1687 1689 else
1688 1690 return (dmu_recv_new_end(drc));
1689 1691 }
↓ open down ↓ |
104 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX