Print this page
3888 zfs recv -F should destroy any snapshots created since the incremental source
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Peng Dai <peng.dai@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu_send.c
+++ new/usr/src/uts/common/fs/zfs/dmu_send.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/dmu.h>
29 29 #include <sys/dmu_impl.h>
30 30 #include <sys/dmu_tx.h>
31 31 #include <sys/dbuf.h>
32 32 #include <sys/dnode.h>
33 33 #include <sys/zfs_context.h>
34 34 #include <sys/dmu_objset.h>
35 35 #include <sys/dmu_traverse.h>
36 36 #include <sys/dsl_dataset.h>
37 37 #include <sys/dsl_dir.h>
38 38 #include <sys/dsl_prop.h>
39 39 #include <sys/dsl_pool.h>
40 40 #include <sys/dsl_synctask.h>
41 41 #include <sys/zfs_ioctl.h>
42 42 #include <sys/zap.h>
43 43 #include <sys/zio_checksum.h>
44 44 #include <sys/zfs_znode.h>
45 45 #include <zfs_fletcher.h>
46 46 #include <sys/avl.h>
47 47 #include <sys/ddt.h>
48 48 #include <sys/zfs_onexit.h>
49 49 #include <sys/dmu_send.h>
50 50 #include <sys/dsl_destroy.h>
51 51
52 52 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
53 53 int zfs_send_corrupt_data = B_FALSE;
54 54
55 55 static char *dmu_recv_tag = "dmu_recv_tag";
56 56 static const char *recv_clone_name = "%recv";
57 57
58 58 static int
59 59 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
60 60 {
61 61 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
62 62 ssize_t resid; /* have to get resid to get detailed errno */
63 63 ASSERT0(len % 8);
64 64
65 65 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
66 66 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
67 67 (caddr_t)buf, len,
68 68 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
69 69
70 70 mutex_enter(&ds->ds_sendstream_lock);
71 71 *dsp->dsa_off += len;
72 72 mutex_exit(&ds->ds_sendstream_lock);
73 73
74 74 return (dsp->dsa_err);
75 75 }
76 76
77 77 static int
78 78 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
79 79 uint64_t length)
80 80 {
81 81 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
82 82
83 83 if (length != -1ULL && offset + length < offset)
84 84 length = -1ULL;
85 85
86 86 /*
87 87 * If there is a pending op, but it's not PENDING_FREE, push it out,
88 88 * since free block aggregation can only be done for blocks of the
89 89 * same type (i.e., DRR_FREE records can only be aggregated with
90 90 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
91 91 * aggregated with other DRR_FREEOBJECTS records.
92 92 */
93 93 if (dsp->dsa_pending_op != PENDING_NONE &&
94 94 dsp->dsa_pending_op != PENDING_FREE) {
95 95 if (dump_bytes(dsp, dsp->dsa_drr,
96 96 sizeof (dmu_replay_record_t)) != 0)
97 97 return (SET_ERROR(EINTR));
98 98 dsp->dsa_pending_op = PENDING_NONE;
99 99 }
100 100
101 101 if (dsp->dsa_pending_op == PENDING_FREE) {
102 102 /*
103 103 * There should never be a PENDING_FREE if length is -1
104 104 * (because dump_dnode is the only place where this
105 105 * function is called with a -1, and only after flushing
106 106 * any pending record).
107 107 */
108 108 ASSERT(length != -1ULL);
109 109 /*
110 110 * Check to see whether this free block can be aggregated
111 111 * with pending one.
112 112 */
113 113 if (drrf->drr_object == object && drrf->drr_offset +
114 114 drrf->drr_length == offset) {
115 115 drrf->drr_length += length;
116 116 return (0);
117 117 } else {
118 118 /* not a continuation. Push out pending record */
119 119 if (dump_bytes(dsp, dsp->dsa_drr,
120 120 sizeof (dmu_replay_record_t)) != 0)
121 121 return (SET_ERROR(EINTR));
122 122 dsp->dsa_pending_op = PENDING_NONE;
123 123 }
124 124 }
125 125 /* create a FREE record and make it pending */
126 126 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
127 127 dsp->dsa_drr->drr_type = DRR_FREE;
128 128 drrf->drr_object = object;
129 129 drrf->drr_offset = offset;
130 130 drrf->drr_length = length;
131 131 drrf->drr_toguid = dsp->dsa_toguid;
132 132 if (length == -1ULL) {
133 133 if (dump_bytes(dsp, dsp->dsa_drr,
134 134 sizeof (dmu_replay_record_t)) != 0)
135 135 return (SET_ERROR(EINTR));
136 136 } else {
137 137 dsp->dsa_pending_op = PENDING_FREE;
138 138 }
139 139
140 140 return (0);
141 141 }
142 142
143 143 static int
144 144 dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
145 145 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
146 146 {
147 147 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
148 148
149 149
150 150 /*
151 151 * If there is any kind of pending aggregation (currently either
152 152 * a grouping of free objects or free blocks), push it out to
153 153 * the stream, since aggregation can't be done across operations
154 154 * of different types.
155 155 */
156 156 if (dsp->dsa_pending_op != PENDING_NONE) {
157 157 if (dump_bytes(dsp, dsp->dsa_drr,
158 158 sizeof (dmu_replay_record_t)) != 0)
159 159 return (SET_ERROR(EINTR));
160 160 dsp->dsa_pending_op = PENDING_NONE;
161 161 }
162 162 /* write a DATA record */
163 163 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
164 164 dsp->dsa_drr->drr_type = DRR_WRITE;
165 165 drrw->drr_object = object;
166 166 drrw->drr_type = type;
167 167 drrw->drr_offset = offset;
168 168 drrw->drr_length = blksz;
169 169 drrw->drr_toguid = dsp->dsa_toguid;
170 170 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
171 171 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
172 172 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
173 173 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
174 174 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
175 175 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
176 176 drrw->drr_key.ddk_cksum = bp->blk_cksum;
177 177
178 178 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
179 179 return (SET_ERROR(EINTR));
180 180 if (dump_bytes(dsp, data, blksz) != 0)
181 181 return (SET_ERROR(EINTR));
182 182 return (0);
183 183 }
184 184
185 185 static int
186 186 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
187 187 {
188 188 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
189 189
190 190 if (dsp->dsa_pending_op != PENDING_NONE) {
191 191 if (dump_bytes(dsp, dsp->dsa_drr,
192 192 sizeof (dmu_replay_record_t)) != 0)
193 193 return (SET_ERROR(EINTR));
194 194 dsp->dsa_pending_op = PENDING_NONE;
195 195 }
196 196
197 197 /* write a SPILL record */
198 198 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
199 199 dsp->dsa_drr->drr_type = DRR_SPILL;
200 200 drrs->drr_object = object;
201 201 drrs->drr_length = blksz;
202 202 drrs->drr_toguid = dsp->dsa_toguid;
203 203
204 204 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
205 205 return (SET_ERROR(EINTR));
206 206 if (dump_bytes(dsp, data, blksz))
207 207 return (SET_ERROR(EINTR));
208 208 return (0);
209 209 }
210 210
211 211 static int
212 212 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
213 213 {
214 214 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
215 215
216 216 /*
217 217 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
218 218 * push it out, since free block aggregation can only be done for
219 219 * blocks of the same type (i.e., DRR_FREE records can only be
220 220 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
221 221 * can only be aggregated with other DRR_FREEOBJECTS records.
222 222 */
223 223 if (dsp->dsa_pending_op != PENDING_NONE &&
224 224 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
225 225 if (dump_bytes(dsp, dsp->dsa_drr,
226 226 sizeof (dmu_replay_record_t)) != 0)
227 227 return (SET_ERROR(EINTR));
228 228 dsp->dsa_pending_op = PENDING_NONE;
229 229 }
230 230 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
231 231 /*
232 232 * See whether this free object array can be aggregated
233 233 * with pending one
234 234 */
235 235 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
236 236 drrfo->drr_numobjs += numobjs;
237 237 return (0);
238 238 } else {
239 239 /* can't be aggregated. Push out pending record */
240 240 if (dump_bytes(dsp, dsp->dsa_drr,
241 241 sizeof (dmu_replay_record_t)) != 0)
242 242 return (SET_ERROR(EINTR));
243 243 dsp->dsa_pending_op = PENDING_NONE;
244 244 }
245 245 }
246 246
247 247 /* write a FREEOBJECTS record */
248 248 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
249 249 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
250 250 drrfo->drr_firstobj = firstobj;
251 251 drrfo->drr_numobjs = numobjs;
252 252 drrfo->drr_toguid = dsp->dsa_toguid;
253 253
254 254 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
255 255
256 256 return (0);
257 257 }
258 258
259 259 static int
260 260 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
261 261 {
262 262 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
263 263
264 264 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
265 265 return (dump_freeobjects(dsp, object, 1));
266 266
267 267 if (dsp->dsa_pending_op != PENDING_NONE) {
268 268 if (dump_bytes(dsp, dsp->dsa_drr,
269 269 sizeof (dmu_replay_record_t)) != 0)
270 270 return (SET_ERROR(EINTR));
271 271 dsp->dsa_pending_op = PENDING_NONE;
272 272 }
273 273
274 274 /* write an OBJECT record */
275 275 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
276 276 dsp->dsa_drr->drr_type = DRR_OBJECT;
277 277 drro->drr_object = object;
278 278 drro->drr_type = dnp->dn_type;
279 279 drro->drr_bonustype = dnp->dn_bonustype;
280 280 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
281 281 drro->drr_bonuslen = dnp->dn_bonuslen;
282 282 drro->drr_checksumtype = dnp->dn_checksum;
283 283 drro->drr_compress = dnp->dn_compress;
284 284 drro->drr_toguid = dsp->dsa_toguid;
285 285
286 286 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
287 287 return (SET_ERROR(EINTR));
288 288
289 289 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
290 290 return (SET_ERROR(EINTR));
291 291
292 292 /* free anything past the end of the file */
293 293 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
294 294 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
295 295 return (SET_ERROR(EINTR));
296 296 if (dsp->dsa_err != 0)
297 297 return (SET_ERROR(EINTR));
298 298 return (0);
299 299 }
300 300
301 301 #define BP_SPAN(dnp, level) \
302 302 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
303 303 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
304 304
305 305 /* ARGSUSED */
306 306 static int
307 307 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
308 308 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
309 309 {
310 310 dmu_sendarg_t *dsp = arg;
311 311 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
312 312 int err = 0;
313 313
314 314 if (issig(JUSTLOOKING) && issig(FORREAL))
315 315 return (SET_ERROR(EINTR));
316 316
317 317 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
318 318 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
319 319 return (0);
320 320 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
321 321 uint64_t span = BP_SPAN(dnp, zb->zb_level);
322 322 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
323 323 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
324 324 } else if (bp == NULL) {
325 325 uint64_t span = BP_SPAN(dnp, zb->zb_level);
326 326 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
327 327 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
328 328 return (0);
329 329 } else if (type == DMU_OT_DNODE) {
330 330 dnode_phys_t *blk;
331 331 int i;
332 332 int blksz = BP_GET_LSIZE(bp);
333 333 uint32_t aflags = ARC_WAIT;
334 334 arc_buf_t *abuf;
335 335
336 336 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
337 337 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
338 338 &aflags, zb) != 0)
339 339 return (SET_ERROR(EIO));
340 340
341 341 blk = abuf->b_data;
342 342 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
343 343 uint64_t dnobj = (zb->zb_blkid <<
344 344 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
345 345 err = dump_dnode(dsp, dnobj, blk+i);
346 346 if (err != 0)
347 347 break;
348 348 }
349 349 (void) arc_buf_remove_ref(abuf, &abuf);
350 350 } else if (type == DMU_OT_SA) {
351 351 uint32_t aflags = ARC_WAIT;
352 352 arc_buf_t *abuf;
353 353 int blksz = BP_GET_LSIZE(bp);
354 354
355 355 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
356 356 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
357 357 &aflags, zb) != 0)
358 358 return (SET_ERROR(EIO));
359 359
360 360 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
361 361 (void) arc_buf_remove_ref(abuf, &abuf);
362 362 } else { /* it's a level-0 block of a regular object */
363 363 uint32_t aflags = ARC_WAIT;
364 364 arc_buf_t *abuf;
365 365 int blksz = BP_GET_LSIZE(bp);
366 366
367 367 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
368 368 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
369 369 &aflags, zb) != 0) {
370 370 if (zfs_send_corrupt_data) {
371 371 /* Send a block filled with 0x"zfs badd bloc" */
372 372 abuf = arc_buf_alloc(spa, blksz, &abuf,
373 373 ARC_BUFC_DATA);
374 374 uint64_t *ptr;
375 375 for (ptr = abuf->b_data;
376 376 (char *)ptr < (char *)abuf->b_data + blksz;
377 377 ptr++)
378 378 *ptr = 0x2f5baddb10c;
379 379 } else {
380 380 return (SET_ERROR(EIO));
381 381 }
382 382 }
383 383
384 384 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
385 385 blksz, bp, abuf->b_data);
386 386 (void) arc_buf_remove_ref(abuf, &abuf);
387 387 }
388 388
389 389 ASSERT(err == 0 || err == EINTR);
390 390 return (err);
391 391 }
392 392
393 393 /*
394 394 * Releases dp, ds, and fromds, using the specified tag.
395 395 */
396 396 static int
397 397 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
398 398 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off)
399 399 {
400 400 objset_t *os;
401 401 dmu_replay_record_t *drr;
402 402 dmu_sendarg_t *dsp;
403 403 int err;
404 404 uint64_t fromtxg = 0;
405 405
406 406 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) {
407 407 dsl_dataset_rele(fromds, tag);
408 408 dsl_dataset_rele(ds, tag);
409 409 dsl_pool_rele(dp, tag);
410 410 return (SET_ERROR(EXDEV));
411 411 }
412 412
413 413 err = dmu_objset_from_ds(ds, &os);
414 414 if (err != 0) {
415 415 if (fromds != NULL)
416 416 dsl_dataset_rele(fromds, tag);
417 417 dsl_dataset_rele(ds, tag);
418 418 dsl_pool_rele(dp, tag);
419 419 return (err);
420 420 }
421 421
422 422 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
423 423 drr->drr_type = DRR_BEGIN;
424 424 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
425 425 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
426 426 DMU_SUBSTREAM);
427 427
428 428 #ifdef _KERNEL
429 429 if (dmu_objset_type(os) == DMU_OST_ZFS) {
430 430 uint64_t version;
431 431 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
432 432 kmem_free(drr, sizeof (dmu_replay_record_t));
433 433 if (fromds != NULL)
434 434 dsl_dataset_rele(fromds, tag);
435 435 dsl_dataset_rele(ds, tag);
436 436 dsl_pool_rele(dp, tag);
437 437 return (SET_ERROR(EINVAL));
438 438 }
439 439 if (version >= ZPL_VERSION_SA) {
440 440 DMU_SET_FEATUREFLAGS(
441 441 drr->drr_u.drr_begin.drr_versioninfo,
442 442 DMU_BACKUP_FEATURE_SA_SPILL);
443 443 }
444 444 }
445 445 #endif
446 446
447 447 drr->drr_u.drr_begin.drr_creation_time =
448 448 ds->ds_phys->ds_creation_time;
449 449 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
450 450 if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
451 451 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
452 452 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
453 453 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
454 454 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
455 455
456 456 if (fromds != NULL)
457 457 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
458 458 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
459 459
460 460 if (fromds != NULL) {
461 461 fromtxg = fromds->ds_phys->ds_creation_txg;
462 462 dsl_dataset_rele(fromds, tag);
463 463 fromds = NULL;
464 464 }
465 465
466 466 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
467 467
468 468 dsp->dsa_drr = drr;
469 469 dsp->dsa_vp = vp;
470 470 dsp->dsa_outfd = outfd;
471 471 dsp->dsa_proc = curproc;
472 472 dsp->dsa_os = os;
473 473 dsp->dsa_off = off;
474 474 dsp->dsa_toguid = ds->ds_phys->ds_guid;
475 475 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
476 476 dsp->dsa_pending_op = PENDING_NONE;
477 477
478 478 mutex_enter(&ds->ds_sendstream_lock);
479 479 list_insert_head(&ds->ds_sendstreams, dsp);
480 480 mutex_exit(&ds->ds_sendstream_lock);
481 481
482 482 dsl_dataset_long_hold(ds, FTAG);
483 483 dsl_pool_rele(dp, tag);
484 484
485 485 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
486 486 err = dsp->dsa_err;
487 487 goto out;
488 488 }
489 489
490 490 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
491 491 backup_cb, dsp);
492 492
493 493 if (dsp->dsa_pending_op != PENDING_NONE)
494 494 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
495 495 err = SET_ERROR(EINTR);
496 496
497 497 if (err != 0) {
498 498 if (err == EINTR && dsp->dsa_err != 0)
499 499 err = dsp->dsa_err;
500 500 goto out;
501 501 }
502 502
503 503 bzero(drr, sizeof (dmu_replay_record_t));
504 504 drr->drr_type = DRR_END;
505 505 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
506 506 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
507 507
508 508 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
509 509 err = dsp->dsa_err;
510 510 goto out;
511 511 }
512 512
513 513 out:
514 514 mutex_enter(&ds->ds_sendstream_lock);
515 515 list_remove(&ds->ds_sendstreams, dsp);
516 516 mutex_exit(&ds->ds_sendstream_lock);
517 517
518 518 kmem_free(drr, sizeof (dmu_replay_record_t));
519 519 kmem_free(dsp, sizeof (dmu_sendarg_t));
520 520
521 521 dsl_dataset_long_rele(ds, FTAG);
522 522 dsl_dataset_rele(ds, tag);
523 523
524 524 return (err);
525 525 }
526 526
527 527 int
528 528 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
529 529 int outfd, vnode_t *vp, offset_t *off)
530 530 {
531 531 dsl_pool_t *dp;
532 532 dsl_dataset_t *ds;
533 533 dsl_dataset_t *fromds = NULL;
534 534 int err;
535 535
536 536 err = dsl_pool_hold(pool, FTAG, &dp);
537 537 if (err != 0)
538 538 return (err);
539 539
540 540 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
541 541 if (err != 0) {
542 542 dsl_pool_rele(dp, FTAG);
543 543 return (err);
544 544 }
545 545
546 546 if (fromsnap != 0) {
547 547 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
548 548 if (err != 0) {
549 549 dsl_dataset_rele(ds, FTAG);
550 550 dsl_pool_rele(dp, FTAG);
551 551 return (err);
552 552 }
553 553 }
554 554
555 555 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, vp, off));
556 556 }
557 557
558 558 int
559 559 dmu_send(const char *tosnap, const char *fromsnap,
560 560 int outfd, vnode_t *vp, offset_t *off)
561 561 {
562 562 dsl_pool_t *dp;
563 563 dsl_dataset_t *ds;
564 564 dsl_dataset_t *fromds = NULL;
565 565 int err;
566 566
567 567 if (strchr(tosnap, '@') == NULL)
568 568 return (SET_ERROR(EINVAL));
569 569 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
570 570 return (SET_ERROR(EINVAL));
571 571
572 572 err = dsl_pool_hold(tosnap, FTAG, &dp);
573 573 if (err != 0)
574 574 return (err);
575 575
576 576 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
577 577 if (err != 0) {
578 578 dsl_pool_rele(dp, FTAG);
579 579 return (err);
580 580 }
581 581
582 582 if (fromsnap != NULL) {
583 583 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
584 584 if (err != 0) {
585 585 dsl_dataset_rele(ds, FTAG);
586 586 dsl_pool_rele(dp, FTAG);
587 587 return (err);
588 588 }
589 589 }
590 590 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, vp, off));
591 591 }
592 592
593 593 int
594 594 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
595 595 {
596 596 dsl_pool_t *dp = ds->ds_dir->dd_pool;
597 597 int err;
598 598 uint64_t size;
599 599
600 600 ASSERT(dsl_pool_config_held(dp));
601 601
602 602 /* tosnap must be a snapshot */
603 603 if (!dsl_dataset_is_snapshot(ds))
604 604 return (SET_ERROR(EINVAL));
605 605
606 606 /*
607 607 * fromsnap must be an earlier snapshot from the same fs as tosnap,
608 608 * or the origin's fs.
609 609 */
610 610 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
611 611 return (SET_ERROR(EXDEV));
612 612
613 613 /* Get uncompressed size estimate of changed data. */
614 614 if (fromds == NULL) {
615 615 size = ds->ds_phys->ds_uncompressed_bytes;
616 616 } else {
617 617 uint64_t used, comp;
618 618 err = dsl_dataset_space_written(fromds, ds,
619 619 &used, &comp, &size);
620 620 if (err != 0)
621 621 return (err);
622 622 }
623 623
624 624 /*
625 625 * Assume that space (both on-disk and in-stream) is dominated by
626 626 * data. We will adjust for indirect blocks and the copies property,
627 627 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
628 628 */
629 629
630 630 /*
631 631 * Subtract out approximate space used by indirect blocks.
632 632 * Assume most space is used by data blocks (non-indirect, non-dnode).
633 633 * Assume all blocks are recordsize. Assume ditto blocks and
634 634 * internal fragmentation counter out compression.
635 635 *
636 636 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
637 637 * block, which we observe in practice.
638 638 */
639 639 uint64_t recordsize;
640 640 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
641 641 if (err != 0)
642 642 return (err);
643 643 size -= size / recordsize * sizeof (blkptr_t);
644 644
645 645 /* Add in the space for the record associated with each block. */
646 646 size += size / recordsize * sizeof (dmu_replay_record_t);
↓ open down ↓ |
646 lines elided |
↑ open up ↑ |
647 647
648 648 *sizep = size;
649 649
650 650 return (0);
651 651 }
652 652
653 653 typedef struct dmu_recv_begin_arg {
654 654 const char *drba_origin;
655 655 dmu_recv_cookie_t *drba_cookie;
656 656 cred_t *drba_cred;
657 + uint64_t drba_snapobj;
657 658 } dmu_recv_begin_arg_t;
658 659
659 660 static int
660 661 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
661 662 uint64_t fromguid)
662 663 {
663 664 uint64_t val;
664 665 int error;
665 666 dsl_pool_t *dp = ds->ds_dir->dd_pool;
666 667
667 - /* must not have any changes since most recent snapshot */
668 - if (!drba->drba_cookie->drc_force &&
669 - dsl_dataset_modified_since_lastsnap(ds))
670 - return (SET_ERROR(ETXTBSY));
671 -
672 668 /* temporary clone name must not exist */
673 669 error = zap_lookup(dp->dp_meta_objset,
674 670 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name,
675 671 8, 1, &val);
676 672 if (error != ENOENT)
677 673 return (error == 0 ? EBUSY : error);
678 674
679 675 /* new snapshot name must not exist */
680 676 error = zap_lookup(dp->dp_meta_objset,
681 677 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap,
682 678 8, 1, &val);
683 679 if (error != ENOENT)
684 680 return (error == 0 ? EEXIST : error);
685 681
686 682 if (fromguid != 0) {
687 - /* if incremental, most recent snapshot must match fromguid */
688 - if (ds->ds_prev == NULL)
683 + dsl_dataset_t *snap;
684 + uint64_t obj = ds->ds_phys->ds_prev_snap_obj;
685 +
686 + /* Find snapshot in this dir that matches fromguid. */
687 + while (obj != 0) {
688 + error = dsl_dataset_hold_obj(dp, obj, FTAG,
689 + &snap);
690 + if (error != 0)
691 + return (SET_ERROR(ENODEV));
692 + if (snap->ds_dir != ds->ds_dir) {
693 + dsl_dataset_rele(snap, FTAG);
694 + return (SET_ERROR(ENODEV));
695 + }
696 + if (snap->ds_phys->ds_guid == fromguid)
697 + break;
698 + obj = snap->ds_phys->ds_prev_snap_obj;
699 + dsl_dataset_rele(snap, FTAG);
700 + }
701 + if (obj == 0)
689 702 return (SET_ERROR(ENODEV));
690 703
691 - /*
692 - * most recent snapshot must match fromguid, or there are no
693 - * changes since the fromguid one
694 - */
695 - if (ds->ds_prev->ds_phys->ds_guid != fromguid) {
696 - uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
697 - uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
698 - while (obj != 0) {
699 - dsl_dataset_t *snap;
700 - error = dsl_dataset_hold_obj(dp, obj, FTAG,
701 - &snap);
702 - if (error != 0)
703 - return (SET_ERROR(ENODEV));
704 - if (snap->ds_phys->ds_creation_txg < birth) {
705 - dsl_dataset_rele(snap, FTAG);
706 - return (SET_ERROR(ENODEV));
707 - }
708 - if (snap->ds_phys->ds_guid == fromguid) {
709 - dsl_dataset_rele(snap, FTAG);
710 - break; /* it's ok */
711 - }
712 - obj = snap->ds_phys->ds_prev_snap_obj;
704 + if (drba->drba_cookie->drc_force) {
705 + drba->drba_snapobj = obj;
706 + } else {
707 + /*
708 + * If we are not forcing, there must be no
709 + * changes since fromsnap.
710 + */
711 + if (dsl_dataset_modified_since_snap(ds, snap)) {
713 712 dsl_dataset_rele(snap, FTAG);
713 + return (SET_ERROR(ETXTBSY));
714 714 }
715 - if (obj == 0)
716 - return (SET_ERROR(ENODEV));
715 + drba->drba_snapobj = ds->ds_prev->ds_object;
717 716 }
717 +
718 + dsl_dataset_rele(snap, FTAG);
718 719 } else {
719 720 /* if full, most recent snapshot must be $ORIGIN */
720 721 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
721 722 return (SET_ERROR(ENODEV));
723 + drba->drba_snapobj = ds->ds_phys->ds_prev_snap_obj;
722 724 }
723 725
724 726 return (0);
725 727
726 728 }
727 729
728 730 static int
729 731 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
730 732 {
731 733 dmu_recv_begin_arg_t *drba = arg;
732 734 dsl_pool_t *dp = dmu_tx_pool(tx);
733 735 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
734 736 uint64_t fromguid = drrb->drr_fromguid;
735 737 int flags = drrb->drr_flags;
736 738 int error;
737 739 dsl_dataset_t *ds;
738 740 const char *tofs = drba->drba_cookie->drc_tofs;
739 741
740 742 /* already checked */
741 743 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
742 744
743 745 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
744 746 DMU_COMPOUNDSTREAM ||
745 747 drrb->drr_type >= DMU_OST_NUMTYPES ||
746 748 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
747 749 return (SET_ERROR(EINVAL));
748 750
749 751 /* Verify pool version supports SA if SA_SPILL feature set */
750 752 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
751 753 DMU_BACKUP_FEATURE_SA_SPILL) &&
752 754 spa_version(dp->dp_spa) < SPA_VERSION_SA) {
753 755 return (SET_ERROR(ENOTSUP));
754 756 }
755 757
756 758 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
757 759 if (error == 0) {
758 760 /* target fs already exists; recv into temp clone */
759 761
760 762 /* Can't recv a clone into an existing fs */
761 763 if (flags & DRR_FLAG_CLONE) {
762 764 dsl_dataset_rele(ds, FTAG);
763 765 return (SET_ERROR(EINVAL));
764 766 }
765 767
766 768 error = recv_begin_check_existing_impl(drba, ds, fromguid);
767 769 dsl_dataset_rele(ds, FTAG);
768 770 } else if (error == ENOENT) {
769 771 /* target fs does not exist; must be a full backup or clone */
770 772 char buf[MAXNAMELEN];
771 773
772 774 /*
773 775 * If it's a non-clone incremental, we are missing the
774 776 * target fs, so fail the recv.
775 777 */
776 778 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
777 779 return (SET_ERROR(ENOENT));
778 780
779 781 /* Open the parent of tofs */
780 782 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
781 783 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
782 784 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
783 785 if (error != 0)
784 786 return (error);
785 787
786 788 if (drba->drba_origin != NULL) {
787 789 dsl_dataset_t *origin;
788 790 error = dsl_dataset_hold(dp, drba->drba_origin,
789 791 FTAG, &origin);
790 792 if (error != 0) {
791 793 dsl_dataset_rele(ds, FTAG);
792 794 return (error);
793 795 }
794 796 if (!dsl_dataset_is_snapshot(origin)) {
795 797 dsl_dataset_rele(origin, FTAG);
796 798 dsl_dataset_rele(ds, FTAG);
797 799 return (SET_ERROR(EINVAL));
798 800 }
799 801 if (origin->ds_phys->ds_guid != fromguid) {
800 802 dsl_dataset_rele(origin, FTAG);
801 803 dsl_dataset_rele(ds, FTAG);
802 804 return (SET_ERROR(ENODEV));
803 805 }
804 806 dsl_dataset_rele(origin, FTAG);
805 807 }
806 808 dsl_dataset_rele(ds, FTAG);
807 809 error = 0;
808 810 }
809 811 return (error);
810 812 }
811 813
812 814 static void
813 815 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
814 816 {
815 817 dmu_recv_begin_arg_t *drba = arg;
816 818 dsl_pool_t *dp = dmu_tx_pool(tx);
817 819 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
818 820 const char *tofs = drba->drba_cookie->drc_tofs;
819 821 dsl_dataset_t *ds, *newds;
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
820 822 uint64_t dsobj;
821 823 int error;
822 824 uint64_t crflags;
823 825
824 826 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
825 827 DS_FLAG_CI_DATASET : 0;
826 828
827 829 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
828 830 if (error == 0) {
829 831 /* create temporary clone */
832 + dsl_dataset_t *snap = NULL;
833 + if (drba->drba_snapobj != 0) {
834 + VERIFY0(dsl_dataset_hold_obj(dp,
835 + drba->drba_snapobj, FTAG, &snap));
836 + }
830 837 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
831 - ds->ds_prev, crflags, drba->drba_cred, tx);
838 + snap, crflags, drba->drba_cred, tx);
839 + dsl_dataset_rele(snap, FTAG);
832 840 dsl_dataset_rele(ds, FTAG);
833 841 } else {
834 842 dsl_dir_t *dd;
835 843 const char *tail;
836 844 dsl_dataset_t *origin = NULL;
837 845
838 846 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
839 847
840 848 if (drba->drba_origin != NULL) {
841 849 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
842 850 FTAG, &origin));
843 851 }
844 852
845 853 /* Create new dataset. */
846 854 dsobj = dsl_dataset_create_sync(dd,
847 855 strrchr(tofs, '/') + 1,
848 856 origin, crflags, drba->drba_cred, tx);
849 857 if (origin != NULL)
850 858 dsl_dataset_rele(origin, FTAG);
851 859 dsl_dir_rele(dd, FTAG);
852 860 drba->drba_cookie->drc_newfs = B_TRUE;
853 861 }
854 862 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
855 863
856 864 dmu_buf_will_dirty(newds->ds_dbuf, tx);
857 865 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
858 866
859 867 /*
860 868 * If we actually created a non-clone, we need to create the
861 869 * objset in our new dataset.
862 870 */
863 871 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
864 872 (void) dmu_objset_create_impl(dp->dp_spa,
865 873 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
866 874 }
867 875
868 876 drba->drba_cookie->drc_ds = newds;
869 877
870 878 spa_history_log_internal_ds(newds, "receive", tx, "");
871 879 }
872 880
873 881 /*
874 882 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
875 883 * succeeds; otherwise we will leak the holds on the datasets.
876 884 */
877 885 int
878 886 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
879 887 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
880 888 {
881 889 dmu_recv_begin_arg_t drba = { 0 };
882 890 dmu_replay_record_t *drr;
883 891
884 892 bzero(drc, sizeof (dmu_recv_cookie_t));
885 893 drc->drc_drrb = drrb;
886 894 drc->drc_tosnap = tosnap;
887 895 drc->drc_tofs = tofs;
888 896 drc->drc_force = force;
889 897
890 898 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
891 899 drc->drc_byteswap = B_TRUE;
892 900 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
893 901 return (SET_ERROR(EINVAL));
894 902
895 903 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
896 904 drr->drr_type = DRR_BEGIN;
897 905 drr->drr_u.drr_begin = *drc->drc_drrb;
898 906 if (drc->drc_byteswap) {
899 907 fletcher_4_incremental_byteswap(drr,
900 908 sizeof (dmu_replay_record_t), &drc->drc_cksum);
901 909 } else {
902 910 fletcher_4_incremental_native(drr,
903 911 sizeof (dmu_replay_record_t), &drc->drc_cksum);
904 912 }
905 913 kmem_free(drr, sizeof (dmu_replay_record_t));
906 914
907 915 if (drc->drc_byteswap) {
908 916 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
909 917 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
910 918 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
911 919 drrb->drr_type = BSWAP_32(drrb->drr_type);
912 920 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
913 921 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
914 922 }
915 923
916 924 drba.drba_origin = origin;
917 925 drba.drba_cookie = drc;
918 926 drba.drba_cred = CRED();
919 927
920 928 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
921 929 &drba, 5));
922 930 }
923 931
924 932 struct restorearg {
925 933 int err;
926 934 boolean_t byteswap;
927 935 vnode_t *vp;
928 936 char *buf;
929 937 uint64_t voff;
930 938 int bufsize; /* amount of memory allocated for buf */
931 939 zio_cksum_t cksum;
932 940 avl_tree_t *guid_to_ds_map;
933 941 };
934 942
935 943 typedef struct guid_map_entry {
936 944 uint64_t guid;
937 945 dsl_dataset_t *gme_ds;
938 946 avl_node_t avlnode;
939 947 } guid_map_entry_t;
940 948
941 949 static int
942 950 guid_compare(const void *arg1, const void *arg2)
943 951 {
944 952 const guid_map_entry_t *gmep1 = arg1;
945 953 const guid_map_entry_t *gmep2 = arg2;
946 954
947 955 if (gmep1->guid < gmep2->guid)
948 956 return (-1);
949 957 else if (gmep1->guid > gmep2->guid)
950 958 return (1);
951 959 return (0);
952 960 }
953 961
954 962 static void
955 963 free_guid_map_onexit(void *arg)
956 964 {
957 965 avl_tree_t *ca = arg;
958 966 void *cookie = NULL;
959 967 guid_map_entry_t *gmep;
960 968
961 969 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
962 970 dsl_dataset_long_rele(gmep->gme_ds, gmep);
963 971 dsl_dataset_rele(gmep->gme_ds, gmep);
964 972 kmem_free(gmep, sizeof (guid_map_entry_t));
965 973 }
966 974 avl_destroy(ca);
967 975 kmem_free(ca, sizeof (avl_tree_t));
968 976 }
969 977
970 978 static void *
971 979 restore_read(struct restorearg *ra, int len)
972 980 {
973 981 void *rv;
974 982 int done = 0;
975 983
976 984 /* some things will require 8-byte alignment, so everything must */
977 985 ASSERT0(len % 8);
978 986
979 987 while (done < len) {
980 988 ssize_t resid;
981 989
982 990 ra->err = vn_rdwr(UIO_READ, ra->vp,
983 991 (caddr_t)ra->buf + done, len - done,
984 992 ra->voff, UIO_SYSSPACE, FAPPEND,
985 993 RLIM64_INFINITY, CRED(), &resid);
986 994
987 995 if (resid == len - done)
988 996 ra->err = SET_ERROR(EINVAL);
989 997 ra->voff += len - done - resid;
990 998 done = len - resid;
991 999 if (ra->err != 0)
992 1000 return (NULL);
993 1001 }
994 1002
995 1003 ASSERT3U(done, ==, len);
996 1004 rv = ra->buf;
997 1005 if (ra->byteswap)
998 1006 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
999 1007 else
1000 1008 fletcher_4_incremental_native(rv, len, &ra->cksum);
1001 1009 return (rv);
1002 1010 }
1003 1011
1004 1012 static void
1005 1013 backup_byteswap(dmu_replay_record_t *drr)
1006 1014 {
1007 1015 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1008 1016 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1009 1017 drr->drr_type = BSWAP_32(drr->drr_type);
1010 1018 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1011 1019 switch (drr->drr_type) {
1012 1020 case DRR_BEGIN:
1013 1021 DO64(drr_begin.drr_magic);
1014 1022 DO64(drr_begin.drr_versioninfo);
1015 1023 DO64(drr_begin.drr_creation_time);
1016 1024 DO32(drr_begin.drr_type);
1017 1025 DO32(drr_begin.drr_flags);
1018 1026 DO64(drr_begin.drr_toguid);
1019 1027 DO64(drr_begin.drr_fromguid);
1020 1028 break;
1021 1029 case DRR_OBJECT:
1022 1030 DO64(drr_object.drr_object);
1023 1031 /* DO64(drr_object.drr_allocation_txg); */
1024 1032 DO32(drr_object.drr_type);
1025 1033 DO32(drr_object.drr_bonustype);
1026 1034 DO32(drr_object.drr_blksz);
1027 1035 DO32(drr_object.drr_bonuslen);
1028 1036 DO64(drr_object.drr_toguid);
1029 1037 break;
1030 1038 case DRR_FREEOBJECTS:
1031 1039 DO64(drr_freeobjects.drr_firstobj);
1032 1040 DO64(drr_freeobjects.drr_numobjs);
1033 1041 DO64(drr_freeobjects.drr_toguid);
1034 1042 break;
1035 1043 case DRR_WRITE:
1036 1044 DO64(drr_write.drr_object);
1037 1045 DO32(drr_write.drr_type);
1038 1046 DO64(drr_write.drr_offset);
1039 1047 DO64(drr_write.drr_length);
1040 1048 DO64(drr_write.drr_toguid);
1041 1049 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1042 1050 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1043 1051 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1044 1052 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1045 1053 DO64(drr_write.drr_key.ddk_prop);
1046 1054 break;
1047 1055 case DRR_WRITE_BYREF:
1048 1056 DO64(drr_write_byref.drr_object);
1049 1057 DO64(drr_write_byref.drr_offset);
1050 1058 DO64(drr_write_byref.drr_length);
1051 1059 DO64(drr_write_byref.drr_toguid);
1052 1060 DO64(drr_write_byref.drr_refguid);
1053 1061 DO64(drr_write_byref.drr_refobject);
1054 1062 DO64(drr_write_byref.drr_refoffset);
1055 1063 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1056 1064 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1057 1065 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1058 1066 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1059 1067 DO64(drr_write_byref.drr_key.ddk_prop);
1060 1068 break;
1061 1069 case DRR_FREE:
1062 1070 DO64(drr_free.drr_object);
1063 1071 DO64(drr_free.drr_offset);
1064 1072 DO64(drr_free.drr_length);
1065 1073 DO64(drr_free.drr_toguid);
1066 1074 break;
1067 1075 case DRR_SPILL:
1068 1076 DO64(drr_spill.drr_object);
1069 1077 DO64(drr_spill.drr_length);
1070 1078 DO64(drr_spill.drr_toguid);
1071 1079 break;
1072 1080 case DRR_END:
1073 1081 DO64(drr_end.drr_checksum.zc_word[0]);
1074 1082 DO64(drr_end.drr_checksum.zc_word[1]);
1075 1083 DO64(drr_end.drr_checksum.zc_word[2]);
1076 1084 DO64(drr_end.drr_checksum.zc_word[3]);
1077 1085 DO64(drr_end.drr_toguid);
1078 1086 break;
1079 1087 }
1080 1088 #undef DO64
1081 1089 #undef DO32
1082 1090 }
1083 1091
1084 1092 static int
1085 1093 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1086 1094 {
1087 1095 int err;
1088 1096 dmu_tx_t *tx;
1089 1097 void *data = NULL;
1090 1098
1091 1099 if (drro->drr_type == DMU_OT_NONE ||
1092 1100 !DMU_OT_IS_VALID(drro->drr_type) ||
1093 1101 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1094 1102 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1095 1103 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1096 1104 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1097 1105 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1098 1106 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1099 1107 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1100 1108 return (SET_ERROR(EINVAL));
1101 1109 }
1102 1110
1103 1111 err = dmu_object_info(os, drro->drr_object, NULL);
1104 1112
1105 1113 if (err != 0 && err != ENOENT)
1106 1114 return (SET_ERROR(EINVAL));
1107 1115
1108 1116 if (drro->drr_bonuslen) {
1109 1117 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1110 1118 if (ra->err != 0)
1111 1119 return (ra->err);
1112 1120 }
1113 1121
1114 1122 if (err == ENOENT) {
1115 1123 /* currently free, want to be allocated */
1116 1124 tx = dmu_tx_create(os);
1117 1125 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1118 1126 err = dmu_tx_assign(tx, TXG_WAIT);
1119 1127 if (err != 0) {
1120 1128 dmu_tx_abort(tx);
1121 1129 return (err);
1122 1130 }
1123 1131 err = dmu_object_claim(os, drro->drr_object,
1124 1132 drro->drr_type, drro->drr_blksz,
1125 1133 drro->drr_bonustype, drro->drr_bonuslen, tx);
1126 1134 dmu_tx_commit(tx);
1127 1135 } else {
1128 1136 /* currently allocated, want to be allocated */
1129 1137 err = dmu_object_reclaim(os, drro->drr_object,
1130 1138 drro->drr_type, drro->drr_blksz,
1131 1139 drro->drr_bonustype, drro->drr_bonuslen);
1132 1140 }
1133 1141 if (err != 0) {
1134 1142 return (SET_ERROR(EINVAL));
1135 1143 }
1136 1144
1137 1145 tx = dmu_tx_create(os);
1138 1146 dmu_tx_hold_bonus(tx, drro->drr_object);
1139 1147 err = dmu_tx_assign(tx, TXG_WAIT);
1140 1148 if (err != 0) {
1141 1149 dmu_tx_abort(tx);
1142 1150 return (err);
1143 1151 }
1144 1152
1145 1153 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1146 1154 tx);
1147 1155 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1148 1156
1149 1157 if (data != NULL) {
1150 1158 dmu_buf_t *db;
1151 1159
1152 1160 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1153 1161 dmu_buf_will_dirty(db, tx);
1154 1162
1155 1163 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1156 1164 bcopy(data, db->db_data, drro->drr_bonuslen);
1157 1165 if (ra->byteswap) {
1158 1166 dmu_object_byteswap_t byteswap =
1159 1167 DMU_OT_BYTESWAP(drro->drr_bonustype);
1160 1168 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1161 1169 drro->drr_bonuslen);
1162 1170 }
1163 1171 dmu_buf_rele(db, FTAG);
1164 1172 }
1165 1173 dmu_tx_commit(tx);
1166 1174 return (0);
1167 1175 }
1168 1176
1169 1177 /* ARGSUSED */
1170 1178 static int
1171 1179 restore_freeobjects(struct restorearg *ra, objset_t *os,
1172 1180 struct drr_freeobjects *drrfo)
1173 1181 {
1174 1182 uint64_t obj;
1175 1183
1176 1184 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1177 1185 return (SET_ERROR(EINVAL));
1178 1186
1179 1187 for (obj = drrfo->drr_firstobj;
1180 1188 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1181 1189 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1182 1190 int err;
1183 1191
1184 1192 if (dmu_object_info(os, obj, NULL) != 0)
1185 1193 continue;
1186 1194
1187 1195 err = dmu_free_object(os, obj);
1188 1196 if (err != 0)
1189 1197 return (err);
1190 1198 }
1191 1199 return (0);
1192 1200 }
1193 1201
1194 1202 static int
1195 1203 restore_write(struct restorearg *ra, objset_t *os,
1196 1204 struct drr_write *drrw)
1197 1205 {
1198 1206 dmu_tx_t *tx;
1199 1207 void *data;
1200 1208 int err;
1201 1209
1202 1210 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1203 1211 !DMU_OT_IS_VALID(drrw->drr_type))
1204 1212 return (SET_ERROR(EINVAL));
1205 1213
1206 1214 data = restore_read(ra, drrw->drr_length);
1207 1215 if (data == NULL)
1208 1216 return (ra->err);
1209 1217
1210 1218 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1211 1219 return (SET_ERROR(EINVAL));
1212 1220
1213 1221 tx = dmu_tx_create(os);
1214 1222
1215 1223 dmu_tx_hold_write(tx, drrw->drr_object,
1216 1224 drrw->drr_offset, drrw->drr_length);
1217 1225 err = dmu_tx_assign(tx, TXG_WAIT);
1218 1226 if (err != 0) {
1219 1227 dmu_tx_abort(tx);
1220 1228 return (err);
1221 1229 }
1222 1230 if (ra->byteswap) {
1223 1231 dmu_object_byteswap_t byteswap =
1224 1232 DMU_OT_BYTESWAP(drrw->drr_type);
1225 1233 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1226 1234 }
1227 1235 dmu_write(os, drrw->drr_object,
1228 1236 drrw->drr_offset, drrw->drr_length, data, tx);
1229 1237 dmu_tx_commit(tx);
1230 1238 return (0);
1231 1239 }
1232 1240
1233 1241 /*
1234 1242 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1235 1243 * streams to refer to a copy of the data that is already on the
1236 1244 * system because it came in earlier in the stream. This function
1237 1245 * finds the earlier copy of the data, and uses that copy instead of
1238 1246 * data from the stream to fulfill this write.
1239 1247 */
1240 1248 static int
1241 1249 restore_write_byref(struct restorearg *ra, objset_t *os,
1242 1250 struct drr_write_byref *drrwbr)
1243 1251 {
1244 1252 dmu_tx_t *tx;
1245 1253 int err;
1246 1254 guid_map_entry_t gmesrch;
1247 1255 guid_map_entry_t *gmep;
1248 1256 avl_index_t where;
1249 1257 objset_t *ref_os = NULL;
1250 1258 dmu_buf_t *dbp;
1251 1259
1252 1260 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1253 1261 return (SET_ERROR(EINVAL));
1254 1262
1255 1263 /*
1256 1264 * If the GUID of the referenced dataset is different from the
1257 1265 * GUID of the target dataset, find the referenced dataset.
1258 1266 */
1259 1267 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1260 1268 gmesrch.guid = drrwbr->drr_refguid;
1261 1269 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1262 1270 &where)) == NULL) {
1263 1271 return (SET_ERROR(EINVAL));
1264 1272 }
1265 1273 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1266 1274 return (SET_ERROR(EINVAL));
1267 1275 } else {
1268 1276 ref_os = os;
1269 1277 }
1270 1278
1271 1279 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1272 1280 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1273 1281 return (err);
1274 1282
1275 1283 tx = dmu_tx_create(os);
1276 1284
1277 1285 dmu_tx_hold_write(tx, drrwbr->drr_object,
1278 1286 drrwbr->drr_offset, drrwbr->drr_length);
1279 1287 err = dmu_tx_assign(tx, TXG_WAIT);
1280 1288 if (err != 0) {
1281 1289 dmu_tx_abort(tx);
1282 1290 return (err);
1283 1291 }
1284 1292 dmu_write(os, drrwbr->drr_object,
1285 1293 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1286 1294 dmu_buf_rele(dbp, FTAG);
1287 1295 dmu_tx_commit(tx);
1288 1296 return (0);
1289 1297 }
1290 1298
1291 1299 static int
1292 1300 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1293 1301 {
1294 1302 dmu_tx_t *tx;
1295 1303 void *data;
1296 1304 dmu_buf_t *db, *db_spill;
1297 1305 int err;
1298 1306
1299 1307 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1300 1308 drrs->drr_length > SPA_MAXBLOCKSIZE)
1301 1309 return (SET_ERROR(EINVAL));
1302 1310
1303 1311 data = restore_read(ra, drrs->drr_length);
1304 1312 if (data == NULL)
1305 1313 return (ra->err);
1306 1314
1307 1315 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1308 1316 return (SET_ERROR(EINVAL));
1309 1317
1310 1318 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1311 1319 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1312 1320 dmu_buf_rele(db, FTAG);
1313 1321 return (err);
1314 1322 }
1315 1323
1316 1324 tx = dmu_tx_create(os);
1317 1325
1318 1326 dmu_tx_hold_spill(tx, db->db_object);
1319 1327
1320 1328 err = dmu_tx_assign(tx, TXG_WAIT);
1321 1329 if (err != 0) {
1322 1330 dmu_buf_rele(db, FTAG);
1323 1331 dmu_buf_rele(db_spill, FTAG);
1324 1332 dmu_tx_abort(tx);
1325 1333 return (err);
1326 1334 }
1327 1335 dmu_buf_will_dirty(db_spill, tx);
1328 1336
1329 1337 if (db_spill->db_size < drrs->drr_length)
1330 1338 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1331 1339 drrs->drr_length, tx));
1332 1340 bcopy(data, db_spill->db_data, drrs->drr_length);
1333 1341
1334 1342 dmu_buf_rele(db, FTAG);
1335 1343 dmu_buf_rele(db_spill, FTAG);
1336 1344
1337 1345 dmu_tx_commit(tx);
1338 1346 return (0);
1339 1347 }
1340 1348
1341 1349 /* ARGSUSED */
1342 1350 static int
1343 1351 restore_free(struct restorearg *ra, objset_t *os,
1344 1352 struct drr_free *drrf)
1345 1353 {
1346 1354 int err;
1347 1355
1348 1356 if (drrf->drr_length != -1ULL &&
1349 1357 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1350 1358 return (SET_ERROR(EINVAL));
1351 1359
1352 1360 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1353 1361 return (SET_ERROR(EINVAL));
1354 1362
1355 1363 err = dmu_free_long_range(os, drrf->drr_object,
1356 1364 drrf->drr_offset, drrf->drr_length);
1357 1365 return (err);
1358 1366 }
1359 1367
1360 1368 /* used to destroy the drc_ds on error */
1361 1369 static void
1362 1370 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1363 1371 {
1364 1372 char name[MAXNAMELEN];
1365 1373 dsl_dataset_name(drc->drc_ds, name);
1366 1374 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1367 1375 (void) dsl_destroy_head(name);
1368 1376 }
1369 1377
1370 1378 /*
1371 1379 * NB: callers *must* call dmu_recv_end() if this succeeds.
1372 1380 */
1373 1381 int
1374 1382 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1375 1383 int cleanup_fd, uint64_t *action_handlep)
1376 1384 {
1377 1385 struct restorearg ra = { 0 };
1378 1386 dmu_replay_record_t *drr;
1379 1387 objset_t *os;
1380 1388 zio_cksum_t pcksum;
1381 1389 int featureflags;
1382 1390
1383 1391 ra.byteswap = drc->drc_byteswap;
1384 1392 ra.cksum = drc->drc_cksum;
1385 1393 ra.vp = vp;
1386 1394 ra.voff = *voffp;
1387 1395 ra.bufsize = 1<<20;
1388 1396 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1389 1397
1390 1398 /* these were verified in dmu_recv_begin */
1391 1399 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1392 1400 DMU_SUBSTREAM);
1393 1401 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1394 1402
1395 1403 /*
1396 1404 * Open the objset we are modifying.
1397 1405 */
1398 1406 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1399 1407
1400 1408 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1401 1409
1402 1410 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1403 1411
1404 1412 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1405 1413 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1406 1414 minor_t minor;
1407 1415
1408 1416 if (cleanup_fd == -1) {
1409 1417 ra.err = SET_ERROR(EBADF);
1410 1418 goto out;
1411 1419 }
1412 1420 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1413 1421 if (ra.err != 0) {
1414 1422 cleanup_fd = -1;
1415 1423 goto out;
1416 1424 }
1417 1425
1418 1426 if (*action_handlep == 0) {
1419 1427 ra.guid_to_ds_map =
1420 1428 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1421 1429 avl_create(ra.guid_to_ds_map, guid_compare,
1422 1430 sizeof (guid_map_entry_t),
1423 1431 offsetof(guid_map_entry_t, avlnode));
1424 1432 ra.err = zfs_onexit_add_cb(minor,
1425 1433 free_guid_map_onexit, ra.guid_to_ds_map,
1426 1434 action_handlep);
1427 1435 if (ra.err != 0)
1428 1436 goto out;
1429 1437 } else {
1430 1438 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1431 1439 (void **)&ra.guid_to_ds_map);
1432 1440 if (ra.err != 0)
1433 1441 goto out;
1434 1442 }
1435 1443
1436 1444 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1437 1445 }
1438 1446
1439 1447 /*
1440 1448 * Read records and process them.
1441 1449 */
1442 1450 pcksum = ra.cksum;
1443 1451 while (ra.err == 0 &&
1444 1452 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1445 1453 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1446 1454 ra.err = SET_ERROR(EINTR);
1447 1455 goto out;
1448 1456 }
1449 1457
1450 1458 if (ra.byteswap)
1451 1459 backup_byteswap(drr);
1452 1460
1453 1461 switch (drr->drr_type) {
1454 1462 case DRR_OBJECT:
1455 1463 {
1456 1464 /*
1457 1465 * We need to make a copy of the record header,
1458 1466 * because restore_{object,write} may need to
1459 1467 * restore_read(), which will invalidate drr.
1460 1468 */
1461 1469 struct drr_object drro = drr->drr_u.drr_object;
1462 1470 ra.err = restore_object(&ra, os, &drro);
1463 1471 break;
1464 1472 }
1465 1473 case DRR_FREEOBJECTS:
1466 1474 {
1467 1475 struct drr_freeobjects drrfo =
1468 1476 drr->drr_u.drr_freeobjects;
1469 1477 ra.err = restore_freeobjects(&ra, os, &drrfo);
1470 1478 break;
1471 1479 }
1472 1480 case DRR_WRITE:
1473 1481 {
1474 1482 struct drr_write drrw = drr->drr_u.drr_write;
1475 1483 ra.err = restore_write(&ra, os, &drrw);
1476 1484 break;
1477 1485 }
1478 1486 case DRR_WRITE_BYREF:
1479 1487 {
1480 1488 struct drr_write_byref drrwbr =
1481 1489 drr->drr_u.drr_write_byref;
1482 1490 ra.err = restore_write_byref(&ra, os, &drrwbr);
1483 1491 break;
1484 1492 }
1485 1493 case DRR_FREE:
1486 1494 {
1487 1495 struct drr_free drrf = drr->drr_u.drr_free;
1488 1496 ra.err = restore_free(&ra, os, &drrf);
1489 1497 break;
1490 1498 }
1491 1499 case DRR_END:
1492 1500 {
1493 1501 struct drr_end drre = drr->drr_u.drr_end;
1494 1502 /*
1495 1503 * We compare against the *previous* checksum
1496 1504 * value, because the stored checksum is of
1497 1505 * everything before the DRR_END record.
1498 1506 */
1499 1507 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1500 1508 ra.err = SET_ERROR(ECKSUM);
1501 1509 goto out;
1502 1510 }
1503 1511 case DRR_SPILL:
1504 1512 {
1505 1513 struct drr_spill drrs = drr->drr_u.drr_spill;
1506 1514 ra.err = restore_spill(&ra, os, &drrs);
1507 1515 break;
1508 1516 }
1509 1517 default:
1510 1518 ra.err = SET_ERROR(EINVAL);
1511 1519 goto out;
1512 1520 }
1513 1521 pcksum = ra.cksum;
1514 1522 }
1515 1523 ASSERT(ra.err != 0);
1516 1524
1517 1525 out:
1518 1526 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1519 1527 zfs_onexit_fd_rele(cleanup_fd);
1520 1528
1521 1529 if (ra.err != 0) {
1522 1530 /*
1523 1531 * destroy what we created, so we don't leave it in the
1524 1532 * inconsistent restoring state.
1525 1533 */
1526 1534 dmu_recv_cleanup_ds(drc);
1527 1535 }
1528 1536
1529 1537 kmem_free(ra.buf, ra.bufsize);
1530 1538 *voffp = ra.voff;
1531 1539 return (ra.err);
1532 1540 }
1533 1541
1534 1542 static int
1535 1543 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1536 1544 {
1537 1545 dmu_recv_cookie_t *drc = arg;
1538 1546 dsl_pool_t *dp = dmu_tx_pool(tx);
↓ open down ↓ |
697 lines elided |
↑ open up ↑ |
1539 1547 int error;
1540 1548
1541 1549 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1542 1550
1543 1551 if (!drc->drc_newfs) {
1544 1552 dsl_dataset_t *origin_head;
1545 1553
1546 1554 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1547 1555 if (error != 0)
1548 1556 return (error);
1557 + if (drc->drc_force) {
1558 + /*
1559 + * We will destroy any snapshots in tofs (i.e. before
1560 + * origin_head) that are after the origin (which is
1561 + * the snap before drc_ds, because drc_ds can not
1562 + * have any snaps of its own).
1563 + */
1564 + uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj;
1565 + while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) {
1566 + dsl_dataset_t *snap;
1567 + error = dsl_dataset_hold_obj(dp, obj, FTAG,
1568 + &snap);
1569 + if (error != 0)
1570 + return (error);
1571 + if (snap->ds_dir != origin_head->ds_dir)
1572 + error = SET_ERROR(EINVAL);
1573 + if (error == 0) {
1574 + error = dsl_destroy_snapshot_check_impl(
1575 + snap, B_FALSE);
1576 + }
1577 + obj = snap->ds_phys->ds_prev_snap_obj;
1578 + dsl_dataset_rele(snap, FTAG);
1579 + if (error != 0)
1580 + return (error);
1581 + }
1582 + }
1549 1583 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1550 1584 origin_head, drc->drc_force, drc->drc_owner, tx);
1551 1585 if (error != 0) {
1552 1586 dsl_dataset_rele(origin_head, FTAG);
1553 1587 return (error);
1554 1588 }
1555 1589 error = dsl_dataset_snapshot_check_impl(origin_head,
1556 1590 drc->drc_tosnap, tx, B_TRUE);
1557 1591 dsl_dataset_rele(origin_head, FTAG);
1558 1592 if (error != 0)
1559 1593 return (error);
1560 1594
1561 1595 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1562 1596 } else {
1563 1597 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1564 1598 drc->drc_tosnap, tx, B_TRUE);
1565 1599 }
1566 1600 return (error);
1567 1601 }
1568 1602
1569 1603 static void
1570 1604 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1571 1605 {
1572 1606 dmu_recv_cookie_t *drc = arg;
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
1573 1607 dsl_pool_t *dp = dmu_tx_pool(tx);
1574 1608
1575 1609 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1576 1610 tx, "snap=%s", drc->drc_tosnap);
1577 1611
1578 1612 if (!drc->drc_newfs) {
1579 1613 dsl_dataset_t *origin_head;
1580 1614
1581 1615 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1582 1616 &origin_head));
1617 +
1618 + if (drc->drc_force) {
1619 + /*
1620 + * Destroy any snapshots of drc_tofs (origin_head)
1621 + * after the origin (the snap before drc_ds).
1622 + */
1623 + uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj;
1624 + while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) {
1625 + dsl_dataset_t *snap;
1626 + VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
1627 + &snap));
1628 + ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
1629 + obj = snap->ds_phys->ds_prev_snap_obj;
1630 + dsl_destroy_snapshot_sync_impl(snap,
1631 + B_FALSE, tx);
1632 + dsl_dataset_rele(snap, FTAG);
1633 + }
1634 + }
1635 + VERIFY3P(drc->drc_ds->ds_prev, ==,
1636 + origin_head->ds_prev);
1637 +
1583 1638 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
1584 1639 origin_head, tx);
1585 1640 dsl_dataset_snapshot_sync_impl(origin_head,
1586 1641 drc->drc_tosnap, tx);
1587 1642
1588 1643 /* set snapshot's creation time and guid */
1589 1644 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
1590 1645 origin_head->ds_prev->ds_phys->ds_creation_time =
1591 1646 drc->drc_drrb->drr_creation_time;
1592 1647 origin_head->ds_prev->ds_phys->ds_guid =
1593 1648 drc->drc_drrb->drr_toguid;
1594 1649 origin_head->ds_prev->ds_phys->ds_flags &=
1595 1650 ~DS_FLAG_INCONSISTENT;
1596 1651
1597 1652 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
1598 1653 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1599 1654
1600 1655 dsl_dataset_rele(origin_head, FTAG);
1601 1656 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
1602 1657
1603 1658 if (drc->drc_owner != NULL)
1604 1659 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
1605 1660 } else {
1606 1661 dsl_dataset_t *ds = drc->drc_ds;
1607 1662
1608 1663 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
1609 1664
1610 1665 /* set snapshot's creation time and guid */
1611 1666 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1612 1667 ds->ds_prev->ds_phys->ds_creation_time =
1613 1668 drc->drc_drrb->drr_creation_time;
1614 1669 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid;
1615 1670 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1616 1671
1617 1672 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1618 1673 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1619 1674 }
1620 1675 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj;
1621 1676 /*
1622 1677 * Release the hold from dmu_recv_begin. This must be done before
1623 1678 * we return to open context, so that when we free the dataset's dnode,
1624 1679 * we can evict its bonus buffer.
1625 1680 */
1626 1681 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1627 1682 drc->drc_ds = NULL;
1628 1683 }
1629 1684
1630 1685 static int
1631 1686 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
1632 1687 {
1633 1688 dsl_pool_t *dp;
1634 1689 dsl_dataset_t *snapds;
1635 1690 guid_map_entry_t *gmep;
1636 1691 int err;
1637 1692
1638 1693 ASSERT(guid_map != NULL);
1639 1694
1640 1695 err = dsl_pool_hold(name, FTAG, &dp);
1641 1696 if (err != 0)
1642 1697 return (err);
1643 1698 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
1644 1699 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
1645 1700 if (err == 0) {
1646 1701 gmep->guid = snapds->ds_phys->ds_guid;
1647 1702 gmep->gme_ds = snapds;
1648 1703 avl_add(guid_map, gmep);
1649 1704 dsl_dataset_long_hold(snapds, gmep);
1650 1705 } else {
1651 1706 kmem_free(gmep, sizeof (*gmep));
1652 1707 }
1653 1708
1654 1709 dsl_pool_rele(dp, FTAG);
1655 1710 return (err);
1656 1711 }
1657 1712
1658 1713 static int dmu_recv_end_modified_blocks = 3;
1659 1714
1660 1715 static int
1661 1716 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1662 1717 {
1663 1718 int error;
1664 1719 char name[MAXNAMELEN];
1665 1720
1666 1721 #ifdef _KERNEL
1667 1722 /*
1668 1723 * We will be destroying the ds; make sure its origin is unmounted if
1669 1724 * necessary.
1670 1725 */
1671 1726 dsl_dataset_name(drc->drc_ds, name);
1672 1727 zfs_destroy_unmount_origin(name);
1673 1728 #endif
1674 1729
1675 1730 error = dsl_sync_task(drc->drc_tofs,
1676 1731 dmu_recv_end_check, dmu_recv_end_sync, drc,
1677 1732 dmu_recv_end_modified_blocks);
1678 1733
1679 1734 if (error != 0)
1680 1735 dmu_recv_cleanup_ds(drc);
1681 1736 return (error);
1682 1737 }
1683 1738
1684 1739 static int
1685 1740 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1686 1741 {
1687 1742 int error;
1688 1743
1689 1744 error = dsl_sync_task(drc->drc_tofs,
1690 1745 dmu_recv_end_check, dmu_recv_end_sync, drc,
1691 1746 dmu_recv_end_modified_blocks);
1692 1747
1693 1748 if (error != 0) {
1694 1749 dmu_recv_cleanup_ds(drc);
1695 1750 } else if (drc->drc_guid_to_ds_map != NULL) {
1696 1751 (void) add_ds_to_guidmap(drc->drc_tofs,
1697 1752 drc->drc_guid_to_ds_map,
1698 1753 drc->drc_newsnapobj);
1699 1754 }
1700 1755 return (error);
1701 1756 }
1702 1757
1703 1758 int
1704 1759 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
1705 1760 {
1706 1761 drc->drc_owner = owner;
1707 1762
1708 1763 if (drc->drc_newfs)
1709 1764 return (dmu_recv_new_end(drc));
1710 1765 else
1711 1766 return (dmu_recv_existing_end(drc));
1712 1767 }
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX