Print this page
4047 panic from dbuf_free_range() from dmu_free_object() while doing zfs receive
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu_send.c
+++ new/usr/src/uts/common/fs/zfs/dmu_send.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/dmu.h>
29 29 #include <sys/dmu_impl.h>
30 30 #include <sys/dmu_tx.h>
31 31 #include <sys/dbuf.h>
32 32 #include <sys/dnode.h>
33 33 #include <sys/zfs_context.h>
34 34 #include <sys/dmu_objset.h>
35 35 #include <sys/dmu_traverse.h>
36 36 #include <sys/dsl_dataset.h>
37 37 #include <sys/dsl_dir.h>
38 38 #include <sys/dsl_prop.h>
39 39 #include <sys/dsl_pool.h>
40 40 #include <sys/dsl_synctask.h>
41 41 #include <sys/zfs_ioctl.h>
42 42 #include <sys/zap.h>
43 43 #include <sys/zio_checksum.h>
44 44 #include <sys/zfs_znode.h>
45 45 #include <zfs_fletcher.h>
46 46 #include <sys/avl.h>
47 47 #include <sys/ddt.h>
48 48 #include <sys/zfs_onexit.h>
49 49 #include <sys/dmu_send.h>
50 50 #include <sys/dsl_destroy.h>
51 51
52 52 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
53 53 int zfs_send_corrupt_data = B_FALSE;
54 54
55 55 static char *dmu_recv_tag = "dmu_recv_tag";
56 56 static const char *recv_clone_name = "%recv";
57 57
58 58 static int
59 59 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
60 60 {
61 61 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
62 62 ssize_t resid; /* have to get resid to get detailed errno */
63 63 ASSERT0(len % 8);
64 64
65 65 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
66 66 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
67 67 (caddr_t)buf, len,
68 68 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
69 69
70 70 mutex_enter(&ds->ds_sendstream_lock);
71 71 *dsp->dsa_off += len;
72 72 mutex_exit(&ds->ds_sendstream_lock);
73 73
74 74 return (dsp->dsa_err);
75 75 }
76 76
77 77 static int
78 78 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
79 79 uint64_t length)
80 80 {
81 81 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
82 82
83 83 /*
84 84 * When we receive a free record, dbuf_free_range() assumes
85 85 * that the receiving system doesn't have any dbufs in the range
86 86 * being freed. This is always true because there is a one-record
87 87 * constraint: we only send one WRITE record for any given
88 88 * object+offset. We know that the one-record constraint is
89 89 * true because we always send data in increasing order by
90 90 * object,offset.
91 91 *
92 92 * If the increasing-order constraint ever changes, we should find
93 93 * another way to assert that the one-record constraint is still
94 94 * satisfied.
95 95 */
96 96 ASSERT(object > dsp->dsa_last_data_object ||
97 97 (object == dsp->dsa_last_data_object &&
98 98 offset > dsp->dsa_last_data_offset));
99 99
100 100 /*
101 101 * If we are doing a non-incremental send, then there can't
102 102 * be any data in the dataset we're receiving into. Therefore
103 103 * a free record would simply be a no-op. Save space by not
104 104 * sending it to begin with.
105 105 */
106 106 if (!dsp->dsa_incremental)
107 107 return (0);
108 108
109 109 if (length != -1ULL && offset + length < offset)
110 110 length = -1ULL;
111 111
112 112 /*
113 113 * If there is a pending op, but it's not PENDING_FREE, push it out,
114 114 * since free block aggregation can only be done for blocks of the
115 115 * same type (i.e., DRR_FREE records can only be aggregated with
116 116 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
117 117 * aggregated with other DRR_FREEOBJECTS records.
118 118 */
119 119 if (dsp->dsa_pending_op != PENDING_NONE &&
120 120 dsp->dsa_pending_op != PENDING_FREE) {
121 121 if (dump_bytes(dsp, dsp->dsa_drr,
122 122 sizeof (dmu_replay_record_t)) != 0)
123 123 return (SET_ERROR(EINTR));
124 124 dsp->dsa_pending_op = PENDING_NONE;
125 125 }
126 126
127 127 if (dsp->dsa_pending_op == PENDING_FREE) {
128 128 /*
129 129 * There should never be a PENDING_FREE if length is -1
130 130 * (because dump_dnode is the only place where this
131 131 * function is called with a -1, and only after flushing
132 132 * any pending record).
133 133 */
134 134 ASSERT(length != -1ULL);
135 135 /*
136 136 * Check to see whether this free block can be aggregated
137 137 * with pending one.
138 138 */
139 139 if (drrf->drr_object == object && drrf->drr_offset +
140 140 drrf->drr_length == offset) {
141 141 drrf->drr_length += length;
142 142 return (0);
143 143 } else {
144 144 /* not a continuation. Push out pending record */
145 145 if (dump_bytes(dsp, dsp->dsa_drr,
146 146 sizeof (dmu_replay_record_t)) != 0)
147 147 return (SET_ERROR(EINTR));
148 148 dsp->dsa_pending_op = PENDING_NONE;
149 149 }
150 150 }
151 151 /* create a FREE record and make it pending */
152 152 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
153 153 dsp->dsa_drr->drr_type = DRR_FREE;
154 154 drrf->drr_object = object;
155 155 drrf->drr_offset = offset;
156 156 drrf->drr_length = length;
157 157 drrf->drr_toguid = dsp->dsa_toguid;
158 158 if (length == -1ULL) {
159 159 if (dump_bytes(dsp, dsp->dsa_drr,
160 160 sizeof (dmu_replay_record_t)) != 0)
161 161 return (SET_ERROR(EINTR));
162 162 } else {
163 163 dsp->dsa_pending_op = PENDING_FREE;
164 164 }
165 165
166 166 return (0);
167 167 }
168 168
169 169 static int
170 170 dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
171 171 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
172 172 {
173 173 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
174 174
175 175 /*
176 176 * We send data in increasing object, offset order.
177 177 * See comment in dump_free() for details.
178 178 */
179 179 ASSERT(object > dsp->dsa_last_data_object ||
180 180 (object == dsp->dsa_last_data_object &&
181 181 offset > dsp->dsa_last_data_offset));
182 182 dsp->dsa_last_data_object = object;
183 183 dsp->dsa_last_data_offset = offset + blksz - 1;
184 184
185 185 /*
186 186 * If there is any kind of pending aggregation (currently either
187 187 * a grouping of free objects or free blocks), push it out to
188 188 * the stream, since aggregation can't be done across operations
189 189 * of different types.
190 190 */
191 191 if (dsp->dsa_pending_op != PENDING_NONE) {
192 192 if (dump_bytes(dsp, dsp->dsa_drr,
193 193 sizeof (dmu_replay_record_t)) != 0)
194 194 return (SET_ERROR(EINTR));
195 195 dsp->dsa_pending_op = PENDING_NONE;
196 196 }
197 197 /* write a DATA record */
198 198 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
199 199 dsp->dsa_drr->drr_type = DRR_WRITE;
200 200 drrw->drr_object = object;
201 201 drrw->drr_type = type;
202 202 drrw->drr_offset = offset;
203 203 drrw->drr_length = blksz;
204 204 drrw->drr_toguid = dsp->dsa_toguid;
205 205 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
206 206 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
207 207 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
208 208 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
209 209 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
210 210 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
211 211 drrw->drr_key.ddk_cksum = bp->blk_cksum;
212 212
213 213 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
214 214 return (SET_ERROR(EINTR));
215 215 if (dump_bytes(dsp, data, blksz) != 0)
216 216 return (SET_ERROR(EINTR));
217 217 return (0);
218 218 }
219 219
220 220 static int
221 221 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
222 222 {
223 223 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
224 224
225 225 if (dsp->dsa_pending_op != PENDING_NONE) {
226 226 if (dump_bytes(dsp, dsp->dsa_drr,
227 227 sizeof (dmu_replay_record_t)) != 0)
228 228 return (SET_ERROR(EINTR));
229 229 dsp->dsa_pending_op = PENDING_NONE;
230 230 }
231 231
232 232 /* write a SPILL record */
233 233 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
234 234 dsp->dsa_drr->drr_type = DRR_SPILL;
235 235 drrs->drr_object = object;
236 236 drrs->drr_length = blksz;
237 237 drrs->drr_toguid = dsp->dsa_toguid;
238 238
239 239 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
240 240 return (SET_ERROR(EINTR));
241 241 if (dump_bytes(dsp, data, blksz))
242 242 return (SET_ERROR(EINTR));
243 243 return (0);
244 244 }
245 245
246 246 static int
247 247 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
248 248 {
249 249 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
250 250
251 251 /* See comment in dump_free(). */
252 252 if (!dsp->dsa_incremental)
253 253 return (0);
254 254
255 255 /*
256 256 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
257 257 * push it out, since free block aggregation can only be done for
258 258 * blocks of the same type (i.e., DRR_FREE records can only be
259 259 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
260 260 * can only be aggregated with other DRR_FREEOBJECTS records.
261 261 */
262 262 if (dsp->dsa_pending_op != PENDING_NONE &&
263 263 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
264 264 if (dump_bytes(dsp, dsp->dsa_drr,
265 265 sizeof (dmu_replay_record_t)) != 0)
266 266 return (SET_ERROR(EINTR));
267 267 dsp->dsa_pending_op = PENDING_NONE;
268 268 }
269 269 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
270 270 /*
271 271 * See whether this free object array can be aggregated
272 272 * with pending one
273 273 */
274 274 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
275 275 drrfo->drr_numobjs += numobjs;
276 276 return (0);
277 277 } else {
278 278 /* can't be aggregated. Push out pending record */
279 279 if (dump_bytes(dsp, dsp->dsa_drr,
280 280 sizeof (dmu_replay_record_t)) != 0)
281 281 return (SET_ERROR(EINTR));
282 282 dsp->dsa_pending_op = PENDING_NONE;
283 283 }
284 284 }
285 285
286 286 /* write a FREEOBJECTS record */
287 287 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
288 288 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
289 289 drrfo->drr_firstobj = firstobj;
290 290 drrfo->drr_numobjs = numobjs;
291 291 drrfo->drr_toguid = dsp->dsa_toguid;
292 292
293 293 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
294 294
295 295 return (0);
296 296 }
297 297
298 298 static int
299 299 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
300 300 {
301 301 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
302 302
303 303 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
304 304 return (dump_freeobjects(dsp, object, 1));
305 305
306 306 if (dsp->dsa_pending_op != PENDING_NONE) {
307 307 if (dump_bytes(dsp, dsp->dsa_drr,
308 308 sizeof (dmu_replay_record_t)) != 0)
309 309 return (SET_ERROR(EINTR));
310 310 dsp->dsa_pending_op = PENDING_NONE;
311 311 }
312 312
313 313 /* write an OBJECT record */
314 314 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
315 315 dsp->dsa_drr->drr_type = DRR_OBJECT;
316 316 drro->drr_object = object;
317 317 drro->drr_type = dnp->dn_type;
318 318 drro->drr_bonustype = dnp->dn_bonustype;
319 319 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
320 320 drro->drr_bonuslen = dnp->dn_bonuslen;
321 321 drro->drr_checksumtype = dnp->dn_checksum;
322 322 drro->drr_compress = dnp->dn_compress;
323 323 drro->drr_toguid = dsp->dsa_toguid;
324 324
325 325 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
326 326 return (SET_ERROR(EINTR));
327 327
328 328 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
329 329 return (SET_ERROR(EINTR));
330 330
331 331 /* Free anything past the end of the file. */
332 332 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
333 333 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
334 334 return (SET_ERROR(EINTR));
335 335 if (dsp->dsa_err != 0)
336 336 return (SET_ERROR(EINTR));
337 337 return (0);
338 338 }
339 339
340 340 #define BP_SPAN(dnp, level) \
341 341 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
342 342 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
343 343
344 344 /* ARGSUSED */
345 345 static int
346 346 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
347 347 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
348 348 {
349 349 dmu_sendarg_t *dsp = arg;
350 350 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
351 351 int err = 0;
352 352
353 353 if (issig(JUSTLOOKING) && issig(FORREAL))
354 354 return (SET_ERROR(EINTR));
355 355
356 356 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
357 357 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
358 358 return (0);
359 359 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
360 360 uint64_t span = BP_SPAN(dnp, zb->zb_level);
361 361 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
362 362 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
363 363 } else if (bp == NULL) {
364 364 uint64_t span = BP_SPAN(dnp, zb->zb_level);
365 365 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
366 366 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
367 367 return (0);
368 368 } else if (type == DMU_OT_DNODE) {
369 369 dnode_phys_t *blk;
370 370 int i;
371 371 int blksz = BP_GET_LSIZE(bp);
372 372 uint32_t aflags = ARC_WAIT;
373 373 arc_buf_t *abuf;
374 374
375 375 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
376 376 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
377 377 &aflags, zb) != 0)
378 378 return (SET_ERROR(EIO));
379 379
380 380 blk = abuf->b_data;
381 381 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
382 382 uint64_t dnobj = (zb->zb_blkid <<
383 383 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
384 384 err = dump_dnode(dsp, dnobj, blk+i);
385 385 if (err != 0)
386 386 break;
387 387 }
388 388 (void) arc_buf_remove_ref(abuf, &abuf);
389 389 } else if (type == DMU_OT_SA) {
390 390 uint32_t aflags = ARC_WAIT;
391 391 arc_buf_t *abuf;
392 392 int blksz = BP_GET_LSIZE(bp);
393 393
394 394 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
395 395 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
396 396 &aflags, zb) != 0)
397 397 return (SET_ERROR(EIO));
398 398
399 399 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
400 400 (void) arc_buf_remove_ref(abuf, &abuf);
401 401 } else { /* it's a level-0 block of a regular object */
402 402 uint32_t aflags = ARC_WAIT;
403 403 arc_buf_t *abuf;
404 404 int blksz = BP_GET_LSIZE(bp);
405 405
406 406 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
407 407 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
408 408 &aflags, zb) != 0) {
409 409 if (zfs_send_corrupt_data) {
410 410 /* Send a block filled with 0x"zfs badd bloc" */
411 411 abuf = arc_buf_alloc(spa, blksz, &abuf,
412 412 ARC_BUFC_DATA);
413 413 uint64_t *ptr;
414 414 for (ptr = abuf->b_data;
415 415 (char *)ptr < (char *)abuf->b_data + blksz;
416 416 ptr++)
417 417 *ptr = 0x2f5baddb10c;
418 418 } else {
419 419 return (SET_ERROR(EIO));
420 420 }
421 421 }
422 422
423 423 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
424 424 blksz, bp, abuf->b_data);
425 425 (void) arc_buf_remove_ref(abuf, &abuf);
426 426 }
427 427
428 428 ASSERT(err == 0 || err == EINTR);
429 429 return (err);
430 430 }
431 431
432 432 /*
433 433 * Releases dp, ds, and fromds, using the specified tag.
434 434 */
435 435 static int
436 436 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
437 437 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off)
438 438 {
439 439 objset_t *os;
440 440 dmu_replay_record_t *drr;
441 441 dmu_sendarg_t *dsp;
442 442 int err;
443 443 uint64_t fromtxg = 0;
444 444
445 445 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) {
446 446 dsl_dataset_rele(fromds, tag);
447 447 dsl_dataset_rele(ds, tag);
448 448 dsl_pool_rele(dp, tag);
449 449 return (SET_ERROR(EXDEV));
450 450 }
451 451
452 452 err = dmu_objset_from_ds(ds, &os);
453 453 if (err != 0) {
454 454 if (fromds != NULL)
455 455 dsl_dataset_rele(fromds, tag);
456 456 dsl_dataset_rele(ds, tag);
457 457 dsl_pool_rele(dp, tag);
458 458 return (err);
459 459 }
460 460
461 461 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
462 462 drr->drr_type = DRR_BEGIN;
463 463 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
464 464 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
465 465 DMU_SUBSTREAM);
466 466
467 467 #ifdef _KERNEL
468 468 if (dmu_objset_type(os) == DMU_OST_ZFS) {
469 469 uint64_t version;
470 470 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
471 471 kmem_free(drr, sizeof (dmu_replay_record_t));
472 472 if (fromds != NULL)
473 473 dsl_dataset_rele(fromds, tag);
474 474 dsl_dataset_rele(ds, tag);
475 475 dsl_pool_rele(dp, tag);
476 476 return (SET_ERROR(EINVAL));
477 477 }
478 478 if (version >= ZPL_VERSION_SA) {
479 479 DMU_SET_FEATUREFLAGS(
480 480 drr->drr_u.drr_begin.drr_versioninfo,
481 481 DMU_BACKUP_FEATURE_SA_SPILL);
482 482 }
483 483 }
484 484 #endif
485 485
486 486 drr->drr_u.drr_begin.drr_creation_time =
487 487 ds->ds_phys->ds_creation_time;
488 488 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
489 489 if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
490 490 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
491 491 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
492 492 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
493 493 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
494 494
495 495 if (fromds != NULL)
496 496 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
497 497 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
498 498
499 499 if (fromds != NULL) {
500 500 fromtxg = fromds->ds_phys->ds_creation_txg;
501 501 dsl_dataset_rele(fromds, tag);
502 502 fromds = NULL;
503 503 }
504 504
505 505 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
506 506
507 507 dsp->dsa_drr = drr;
508 508 dsp->dsa_vp = vp;
509 509 dsp->dsa_outfd = outfd;
510 510 dsp->dsa_proc = curproc;
511 511 dsp->dsa_os = os;
512 512 dsp->dsa_off = off;
513 513 dsp->dsa_toguid = ds->ds_phys->ds_guid;
514 514 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
515 515 dsp->dsa_pending_op = PENDING_NONE;
516 516 dsp->dsa_incremental = (fromtxg != 0);
517 517
518 518 mutex_enter(&ds->ds_sendstream_lock);
519 519 list_insert_head(&ds->ds_sendstreams, dsp);
520 520 mutex_exit(&ds->ds_sendstream_lock);
521 521
522 522 dsl_dataset_long_hold(ds, FTAG);
523 523 dsl_pool_rele(dp, tag);
524 524
525 525 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
526 526 err = dsp->dsa_err;
527 527 goto out;
528 528 }
529 529
530 530 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
531 531 backup_cb, dsp);
532 532
533 533 if (dsp->dsa_pending_op != PENDING_NONE)
534 534 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
535 535 err = SET_ERROR(EINTR);
536 536
537 537 if (err != 0) {
538 538 if (err == EINTR && dsp->dsa_err != 0)
539 539 err = dsp->dsa_err;
540 540 goto out;
541 541 }
542 542
543 543 bzero(drr, sizeof (dmu_replay_record_t));
544 544 drr->drr_type = DRR_END;
545 545 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
546 546 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
547 547
548 548 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
549 549 err = dsp->dsa_err;
550 550 goto out;
551 551 }
552 552
553 553 out:
554 554 mutex_enter(&ds->ds_sendstream_lock);
555 555 list_remove(&ds->ds_sendstreams, dsp);
556 556 mutex_exit(&ds->ds_sendstream_lock);
557 557
558 558 kmem_free(drr, sizeof (dmu_replay_record_t));
559 559 kmem_free(dsp, sizeof (dmu_sendarg_t));
560 560
561 561 dsl_dataset_long_rele(ds, FTAG);
562 562 dsl_dataset_rele(ds, tag);
563 563
564 564 return (err);
565 565 }
566 566
567 567 int
568 568 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
569 569 int outfd, vnode_t *vp, offset_t *off)
570 570 {
571 571 dsl_pool_t *dp;
572 572 dsl_dataset_t *ds;
573 573 dsl_dataset_t *fromds = NULL;
574 574 int err;
575 575
576 576 err = dsl_pool_hold(pool, FTAG, &dp);
577 577 if (err != 0)
578 578 return (err);
579 579
580 580 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
581 581 if (err != 0) {
582 582 dsl_pool_rele(dp, FTAG);
583 583 return (err);
584 584 }
585 585
586 586 if (fromsnap != 0) {
587 587 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
588 588 if (err != 0) {
589 589 dsl_dataset_rele(ds, FTAG);
590 590 dsl_pool_rele(dp, FTAG);
591 591 return (err);
592 592 }
593 593 }
594 594
595 595 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, vp, off));
596 596 }
597 597
598 598 int
599 599 dmu_send(const char *tosnap, const char *fromsnap,
600 600 int outfd, vnode_t *vp, offset_t *off)
601 601 {
602 602 dsl_pool_t *dp;
603 603 dsl_dataset_t *ds;
604 604 dsl_dataset_t *fromds = NULL;
605 605 int err;
606 606
607 607 if (strchr(tosnap, '@') == NULL)
608 608 return (SET_ERROR(EINVAL));
609 609 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
610 610 return (SET_ERROR(EINVAL));
611 611
612 612 err = dsl_pool_hold(tosnap, FTAG, &dp);
613 613 if (err != 0)
614 614 return (err);
615 615
616 616 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
617 617 if (err != 0) {
618 618 dsl_pool_rele(dp, FTAG);
619 619 return (err);
620 620 }
621 621
622 622 if (fromsnap != NULL) {
623 623 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
624 624 if (err != 0) {
625 625 dsl_dataset_rele(ds, FTAG);
626 626 dsl_pool_rele(dp, FTAG);
627 627 return (err);
628 628 }
629 629 }
630 630 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, vp, off));
631 631 }
632 632
633 633 int
634 634 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
635 635 {
636 636 dsl_pool_t *dp = ds->ds_dir->dd_pool;
637 637 int err;
638 638 uint64_t size;
639 639
640 640 ASSERT(dsl_pool_config_held(dp));
641 641
642 642 /* tosnap must be a snapshot */
643 643 if (!dsl_dataset_is_snapshot(ds))
644 644 return (SET_ERROR(EINVAL));
645 645
646 646 /*
647 647 * fromsnap must be an earlier snapshot from the same fs as tosnap,
648 648 * or the origin's fs.
649 649 */
650 650 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
651 651 return (SET_ERROR(EXDEV));
652 652
653 653 /* Get uncompressed size estimate of changed data. */
654 654 if (fromds == NULL) {
655 655 size = ds->ds_phys->ds_uncompressed_bytes;
656 656 } else {
657 657 uint64_t used, comp;
658 658 err = dsl_dataset_space_written(fromds, ds,
659 659 &used, &comp, &size);
660 660 if (err != 0)
661 661 return (err);
662 662 }
663 663
664 664 /*
665 665 * Assume that space (both on-disk and in-stream) is dominated by
666 666 * data. We will adjust for indirect blocks and the copies property,
667 667 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
668 668 */
669 669
670 670 /*
671 671 * Subtract out approximate space used by indirect blocks.
672 672 * Assume most space is used by data blocks (non-indirect, non-dnode).
673 673 * Assume all blocks are recordsize. Assume ditto blocks and
674 674 * internal fragmentation counter out compression.
675 675 *
676 676 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
677 677 * block, which we observe in practice.
678 678 */
679 679 uint64_t recordsize;
680 680 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
681 681 if (err != 0)
682 682 return (err);
683 683 size -= size / recordsize * sizeof (blkptr_t);
684 684
685 685 /* Add in the space for the record associated with each block. */
686 686 size += size / recordsize * sizeof (dmu_replay_record_t);
687 687
688 688 *sizep = size;
689 689
690 690 return (0);
691 691 }
692 692
693 693 typedef struct dmu_recv_begin_arg {
694 694 const char *drba_origin;
695 695 dmu_recv_cookie_t *drba_cookie;
696 696 cred_t *drba_cred;
697 697 uint64_t drba_snapobj;
698 698 } dmu_recv_begin_arg_t;
699 699
700 700 static int
701 701 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
702 702 uint64_t fromguid)
703 703 {
704 704 uint64_t val;
705 705 int error;
706 706 dsl_pool_t *dp = ds->ds_dir->dd_pool;
707 707
708 708 /* temporary clone name must not exist */
709 709 error = zap_lookup(dp->dp_meta_objset,
710 710 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name,
711 711 8, 1, &val);
712 712 if (error != ENOENT)
713 713 return (error == 0 ? EBUSY : error);
714 714
715 715 /* new snapshot name must not exist */
716 716 error = zap_lookup(dp->dp_meta_objset,
717 717 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap,
718 718 8, 1, &val);
719 719 if (error != ENOENT)
720 720 return (error == 0 ? EEXIST : error);
721 721
722 722 if (fromguid != 0) {
723 723 dsl_dataset_t *snap;
724 724 uint64_t obj = ds->ds_phys->ds_prev_snap_obj;
725 725
726 726 /* Find snapshot in this dir that matches fromguid. */
727 727 while (obj != 0) {
728 728 error = dsl_dataset_hold_obj(dp, obj, FTAG,
729 729 &snap);
730 730 if (error != 0)
731 731 return (SET_ERROR(ENODEV));
732 732 if (snap->ds_dir != ds->ds_dir) {
733 733 dsl_dataset_rele(snap, FTAG);
734 734 return (SET_ERROR(ENODEV));
735 735 }
736 736 if (snap->ds_phys->ds_guid == fromguid)
737 737 break;
738 738 obj = snap->ds_phys->ds_prev_snap_obj;
739 739 dsl_dataset_rele(snap, FTAG);
740 740 }
741 741 if (obj == 0)
742 742 return (SET_ERROR(ENODEV));
743 743
744 744 if (drba->drba_cookie->drc_force) {
745 745 drba->drba_snapobj = obj;
746 746 } else {
747 747 /*
748 748 * If we are not forcing, there must be no
749 749 * changes since fromsnap.
750 750 */
751 751 if (dsl_dataset_modified_since_snap(ds, snap)) {
752 752 dsl_dataset_rele(snap, FTAG);
753 753 return (SET_ERROR(ETXTBSY));
754 754 }
755 755 drba->drba_snapobj = ds->ds_prev->ds_object;
756 756 }
757 757
758 758 dsl_dataset_rele(snap, FTAG);
759 759 } else {
760 760 /* if full, most recent snapshot must be $ORIGIN */
761 761 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
762 762 return (SET_ERROR(ENODEV));
763 763 drba->drba_snapobj = ds->ds_phys->ds_prev_snap_obj;
764 764 }
765 765
766 766 return (0);
767 767
768 768 }
769 769
770 770 static int
771 771 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
772 772 {
773 773 dmu_recv_begin_arg_t *drba = arg;
774 774 dsl_pool_t *dp = dmu_tx_pool(tx);
775 775 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
776 776 uint64_t fromguid = drrb->drr_fromguid;
777 777 int flags = drrb->drr_flags;
778 778 int error;
779 779 dsl_dataset_t *ds;
780 780 const char *tofs = drba->drba_cookie->drc_tofs;
781 781
782 782 /* already checked */
783 783 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
784 784
785 785 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
786 786 DMU_COMPOUNDSTREAM ||
787 787 drrb->drr_type >= DMU_OST_NUMTYPES ||
788 788 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
789 789 return (SET_ERROR(EINVAL));
790 790
791 791 /* Verify pool version supports SA if SA_SPILL feature set */
792 792 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
793 793 DMU_BACKUP_FEATURE_SA_SPILL) &&
794 794 spa_version(dp->dp_spa) < SPA_VERSION_SA) {
795 795 return (SET_ERROR(ENOTSUP));
796 796 }
797 797
798 798 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
799 799 if (error == 0) {
800 800 /* target fs already exists; recv into temp clone */
801 801
802 802 /* Can't recv a clone into an existing fs */
803 803 if (flags & DRR_FLAG_CLONE) {
804 804 dsl_dataset_rele(ds, FTAG);
805 805 return (SET_ERROR(EINVAL));
806 806 }
807 807
808 808 error = recv_begin_check_existing_impl(drba, ds, fromguid);
809 809 dsl_dataset_rele(ds, FTAG);
810 810 } else if (error == ENOENT) {
811 811 /* target fs does not exist; must be a full backup or clone */
812 812 char buf[MAXNAMELEN];
813 813
814 814 /*
815 815 * If it's a non-clone incremental, we are missing the
816 816 * target fs, so fail the recv.
817 817 */
818 818 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
819 819 return (SET_ERROR(ENOENT));
820 820
821 821 /* Open the parent of tofs */
822 822 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
823 823 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
824 824 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
825 825 if (error != 0)
826 826 return (error);
827 827
828 828 if (drba->drba_origin != NULL) {
829 829 dsl_dataset_t *origin;
830 830 error = dsl_dataset_hold(dp, drba->drba_origin,
831 831 FTAG, &origin);
832 832 if (error != 0) {
833 833 dsl_dataset_rele(ds, FTAG);
834 834 return (error);
835 835 }
836 836 if (!dsl_dataset_is_snapshot(origin)) {
837 837 dsl_dataset_rele(origin, FTAG);
838 838 dsl_dataset_rele(ds, FTAG);
839 839 return (SET_ERROR(EINVAL));
840 840 }
841 841 if (origin->ds_phys->ds_guid != fromguid) {
842 842 dsl_dataset_rele(origin, FTAG);
843 843 dsl_dataset_rele(ds, FTAG);
844 844 return (SET_ERROR(ENODEV));
845 845 }
846 846 dsl_dataset_rele(origin, FTAG);
847 847 }
848 848 dsl_dataset_rele(ds, FTAG);
849 849 error = 0;
850 850 }
851 851 return (error);
852 852 }
853 853
854 854 static void
855 855 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
856 856 {
857 857 dmu_recv_begin_arg_t *drba = arg;
858 858 dsl_pool_t *dp = dmu_tx_pool(tx);
859 859 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
860 860 const char *tofs = drba->drba_cookie->drc_tofs;
861 861 dsl_dataset_t *ds, *newds;
862 862 uint64_t dsobj;
863 863 int error;
864 864 uint64_t crflags;
865 865
866 866 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
867 867 DS_FLAG_CI_DATASET : 0;
868 868
869 869 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
870 870 if (error == 0) {
871 871 /* create temporary clone */
872 872 dsl_dataset_t *snap = NULL;
873 873 if (drba->drba_snapobj != 0) {
874 874 VERIFY0(dsl_dataset_hold_obj(dp,
875 875 drba->drba_snapobj, FTAG, &snap));
876 876 }
877 877 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
878 878 snap, crflags, drba->drba_cred, tx);
879 879 dsl_dataset_rele(snap, FTAG);
880 880 dsl_dataset_rele(ds, FTAG);
881 881 } else {
882 882 dsl_dir_t *dd;
883 883 const char *tail;
884 884 dsl_dataset_t *origin = NULL;
885 885
886 886 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
887 887
888 888 if (drba->drba_origin != NULL) {
889 889 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
890 890 FTAG, &origin));
891 891 }
892 892
893 893 /* Create new dataset. */
894 894 dsobj = dsl_dataset_create_sync(dd,
895 895 strrchr(tofs, '/') + 1,
896 896 origin, crflags, drba->drba_cred, tx);
897 897 if (origin != NULL)
898 898 dsl_dataset_rele(origin, FTAG);
899 899 dsl_dir_rele(dd, FTAG);
900 900 drba->drba_cookie->drc_newfs = B_TRUE;
901 901 }
902 902 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
903 903
904 904 dmu_buf_will_dirty(newds->ds_dbuf, tx);
905 905 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
906 906
907 907 /*
908 908 * If we actually created a non-clone, we need to create the
909 909 * objset in our new dataset.
910 910 */
911 911 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
912 912 (void) dmu_objset_create_impl(dp->dp_spa,
913 913 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
914 914 }
915 915
916 916 drba->drba_cookie->drc_ds = newds;
917 917
918 918 spa_history_log_internal_ds(newds, "receive", tx, "");
919 919 }
920 920
921 921 /*
922 922 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
923 923 * succeeds; otherwise we will leak the holds on the datasets.
924 924 */
925 925 int
926 926 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
927 927 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
928 928 {
929 929 dmu_recv_begin_arg_t drba = { 0 };
930 930 dmu_replay_record_t *drr;
931 931
932 932 bzero(drc, sizeof (dmu_recv_cookie_t));
933 933 drc->drc_drrb = drrb;
934 934 drc->drc_tosnap = tosnap;
935 935 drc->drc_tofs = tofs;
936 936 drc->drc_force = force;
937 937
938 938 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
939 939 drc->drc_byteswap = B_TRUE;
940 940 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
941 941 return (SET_ERROR(EINVAL));
942 942
943 943 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
944 944 drr->drr_type = DRR_BEGIN;
945 945 drr->drr_u.drr_begin = *drc->drc_drrb;
946 946 if (drc->drc_byteswap) {
947 947 fletcher_4_incremental_byteswap(drr,
948 948 sizeof (dmu_replay_record_t), &drc->drc_cksum);
949 949 } else {
950 950 fletcher_4_incremental_native(drr,
951 951 sizeof (dmu_replay_record_t), &drc->drc_cksum);
952 952 }
953 953 kmem_free(drr, sizeof (dmu_replay_record_t));
954 954
955 955 if (drc->drc_byteswap) {
956 956 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
957 957 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
958 958 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
959 959 drrb->drr_type = BSWAP_32(drrb->drr_type);
960 960 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
961 961 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
962 962 }
963 963
964 964 drba.drba_origin = origin;
965 965 drba.drba_cookie = drc;
966 966 drba.drba_cred = CRED();
967 967
968 968 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
969 969 &drba, 5));
970 970 }
971 971
972 972 struct restorearg {
973 973 int err;
974 974 boolean_t byteswap;
975 975 vnode_t *vp;
976 976 char *buf;
977 977 uint64_t voff;
978 978 int bufsize; /* amount of memory allocated for buf */
979 979 zio_cksum_t cksum;
980 980 avl_tree_t *guid_to_ds_map;
981 981 };
982 982
983 983 typedef struct guid_map_entry {
984 984 uint64_t guid;
985 985 dsl_dataset_t *gme_ds;
986 986 avl_node_t avlnode;
987 987 } guid_map_entry_t;
988 988
989 989 static int
990 990 guid_compare(const void *arg1, const void *arg2)
991 991 {
992 992 const guid_map_entry_t *gmep1 = arg1;
993 993 const guid_map_entry_t *gmep2 = arg2;
994 994
995 995 if (gmep1->guid < gmep2->guid)
996 996 return (-1);
997 997 else if (gmep1->guid > gmep2->guid)
998 998 return (1);
999 999 return (0);
1000 1000 }
1001 1001
1002 1002 static void
1003 1003 free_guid_map_onexit(void *arg)
1004 1004 {
1005 1005 avl_tree_t *ca = arg;
1006 1006 void *cookie = NULL;
1007 1007 guid_map_entry_t *gmep;
1008 1008
1009 1009 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1010 1010 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1011 1011 dsl_dataset_rele(gmep->gme_ds, gmep);
1012 1012 kmem_free(gmep, sizeof (guid_map_entry_t));
1013 1013 }
1014 1014 avl_destroy(ca);
1015 1015 kmem_free(ca, sizeof (avl_tree_t));
1016 1016 }
1017 1017
1018 1018 static void *
1019 1019 restore_read(struct restorearg *ra, int len)
1020 1020 {
1021 1021 void *rv;
1022 1022 int done = 0;
1023 1023
1024 1024 /* some things will require 8-byte alignment, so everything must */
1025 1025 ASSERT0(len % 8);
1026 1026
1027 1027 while (done < len) {
1028 1028 ssize_t resid;
1029 1029
1030 1030 ra->err = vn_rdwr(UIO_READ, ra->vp,
1031 1031 (caddr_t)ra->buf + done, len - done,
1032 1032 ra->voff, UIO_SYSSPACE, FAPPEND,
1033 1033 RLIM64_INFINITY, CRED(), &resid);
1034 1034
1035 1035 if (resid == len - done)
1036 1036 ra->err = SET_ERROR(EINVAL);
1037 1037 ra->voff += len - done - resid;
1038 1038 done = len - resid;
1039 1039 if (ra->err != 0)
1040 1040 return (NULL);
1041 1041 }
1042 1042
1043 1043 ASSERT3U(done, ==, len);
1044 1044 rv = ra->buf;
1045 1045 if (ra->byteswap)
1046 1046 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
1047 1047 else
1048 1048 fletcher_4_incremental_native(rv, len, &ra->cksum);
1049 1049 return (rv);
1050 1050 }
1051 1051
1052 1052 static void
1053 1053 backup_byteswap(dmu_replay_record_t *drr)
1054 1054 {
1055 1055 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1056 1056 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1057 1057 drr->drr_type = BSWAP_32(drr->drr_type);
1058 1058 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1059 1059 switch (drr->drr_type) {
1060 1060 case DRR_BEGIN:
1061 1061 DO64(drr_begin.drr_magic);
1062 1062 DO64(drr_begin.drr_versioninfo);
1063 1063 DO64(drr_begin.drr_creation_time);
1064 1064 DO32(drr_begin.drr_type);
1065 1065 DO32(drr_begin.drr_flags);
1066 1066 DO64(drr_begin.drr_toguid);
1067 1067 DO64(drr_begin.drr_fromguid);
1068 1068 break;
1069 1069 case DRR_OBJECT:
1070 1070 DO64(drr_object.drr_object);
1071 1071 /* DO64(drr_object.drr_allocation_txg); */
1072 1072 DO32(drr_object.drr_type);
1073 1073 DO32(drr_object.drr_bonustype);
1074 1074 DO32(drr_object.drr_blksz);
1075 1075 DO32(drr_object.drr_bonuslen);
1076 1076 DO64(drr_object.drr_toguid);
1077 1077 break;
1078 1078 case DRR_FREEOBJECTS:
1079 1079 DO64(drr_freeobjects.drr_firstobj);
1080 1080 DO64(drr_freeobjects.drr_numobjs);
1081 1081 DO64(drr_freeobjects.drr_toguid);
1082 1082 break;
1083 1083 case DRR_WRITE:
1084 1084 DO64(drr_write.drr_object);
1085 1085 DO32(drr_write.drr_type);
1086 1086 DO64(drr_write.drr_offset);
1087 1087 DO64(drr_write.drr_length);
1088 1088 DO64(drr_write.drr_toguid);
1089 1089 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1090 1090 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1091 1091 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1092 1092 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1093 1093 DO64(drr_write.drr_key.ddk_prop);
1094 1094 break;
1095 1095 case DRR_WRITE_BYREF:
1096 1096 DO64(drr_write_byref.drr_object);
1097 1097 DO64(drr_write_byref.drr_offset);
1098 1098 DO64(drr_write_byref.drr_length);
1099 1099 DO64(drr_write_byref.drr_toguid);
1100 1100 DO64(drr_write_byref.drr_refguid);
1101 1101 DO64(drr_write_byref.drr_refobject);
1102 1102 DO64(drr_write_byref.drr_refoffset);
1103 1103 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1104 1104 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1105 1105 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1106 1106 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1107 1107 DO64(drr_write_byref.drr_key.ddk_prop);
1108 1108 break;
1109 1109 case DRR_FREE:
1110 1110 DO64(drr_free.drr_object);
1111 1111 DO64(drr_free.drr_offset);
1112 1112 DO64(drr_free.drr_length);
1113 1113 DO64(drr_free.drr_toguid);
1114 1114 break;
1115 1115 case DRR_SPILL:
1116 1116 DO64(drr_spill.drr_object);
1117 1117 DO64(drr_spill.drr_length);
1118 1118 DO64(drr_spill.drr_toguid);
1119 1119 break;
1120 1120 case DRR_END:
1121 1121 DO64(drr_end.drr_checksum.zc_word[0]);
1122 1122 DO64(drr_end.drr_checksum.zc_word[1]);
1123 1123 DO64(drr_end.drr_checksum.zc_word[2]);
1124 1124 DO64(drr_end.drr_checksum.zc_word[3]);
1125 1125 DO64(drr_end.drr_toguid);
1126 1126 break;
1127 1127 }
1128 1128 #undef DO64
1129 1129 #undef DO32
1130 1130 }
1131 1131
1132 1132 static int
1133 1133 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1134 1134 {
1135 1135 int err;
1136 1136 dmu_tx_t *tx;
1137 1137 void *data = NULL;
1138 1138
1139 1139 if (drro->drr_type == DMU_OT_NONE ||
1140 1140 !DMU_OT_IS_VALID(drro->drr_type) ||
1141 1141 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1142 1142 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1143 1143 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1144 1144 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1145 1145 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1146 1146 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1147 1147 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1148 1148 return (SET_ERROR(EINVAL));
1149 1149 }
1150 1150
1151 1151 err = dmu_object_info(os, drro->drr_object, NULL);
1152 1152
1153 1153 if (err != 0 && err != ENOENT)
1154 1154 return (SET_ERROR(EINVAL));
1155 1155
1156 1156 if (drro->drr_bonuslen) {
1157 1157 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1158 1158 if (ra->err != 0)
1159 1159 return (ra->err);
1160 1160 }
1161 1161
1162 1162 if (err == ENOENT) {
1163 1163 /* currently free, want to be allocated */
1164 1164 tx = dmu_tx_create(os);
1165 1165 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1166 1166 err = dmu_tx_assign(tx, TXG_WAIT);
1167 1167 if (err != 0) {
1168 1168 dmu_tx_abort(tx);
1169 1169 return (err);
1170 1170 }
1171 1171 err = dmu_object_claim(os, drro->drr_object,
1172 1172 drro->drr_type, drro->drr_blksz,
1173 1173 drro->drr_bonustype, drro->drr_bonuslen, tx);
1174 1174 dmu_tx_commit(tx);
1175 1175 } else {
1176 1176 /* currently allocated, want to be allocated */
1177 1177 err = dmu_object_reclaim(os, drro->drr_object,
1178 1178 drro->drr_type, drro->drr_blksz,
1179 1179 drro->drr_bonustype, drro->drr_bonuslen);
1180 1180 }
1181 1181 if (err != 0) {
1182 1182 return (SET_ERROR(EINVAL));
1183 1183 }
1184 1184
1185 1185 tx = dmu_tx_create(os);
1186 1186 dmu_tx_hold_bonus(tx, drro->drr_object);
1187 1187 err = dmu_tx_assign(tx, TXG_WAIT);
1188 1188 if (err != 0) {
1189 1189 dmu_tx_abort(tx);
1190 1190 return (err);
1191 1191 }
1192 1192
1193 1193 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1194 1194 tx);
1195 1195 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1196 1196
1197 1197 if (data != NULL) {
1198 1198 dmu_buf_t *db;
1199 1199
1200 1200 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1201 1201 dmu_buf_will_dirty(db, tx);
1202 1202
1203 1203 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1204 1204 bcopy(data, db->db_data, drro->drr_bonuslen);
1205 1205 if (ra->byteswap) {
1206 1206 dmu_object_byteswap_t byteswap =
1207 1207 DMU_OT_BYTESWAP(drro->drr_bonustype);
1208 1208 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1209 1209 drro->drr_bonuslen);
1210 1210 }
1211 1211 dmu_buf_rele(db, FTAG);
1212 1212 }
1213 1213 dmu_tx_commit(tx);
1214 1214 return (0);
1215 1215 }
1216 1216
1217 1217 /* ARGSUSED */
1218 1218 static int
1219 1219 restore_freeobjects(struct restorearg *ra, objset_t *os,
1220 1220 struct drr_freeobjects *drrfo)
1221 1221 {
1222 1222 uint64_t obj;
1223 1223
1224 1224 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
↓ open down ↓ |
1224 lines elided |
↑ open up ↑ |
1225 1225 return (SET_ERROR(EINVAL));
1226 1226
1227 1227 for (obj = drrfo->drr_firstobj;
1228 1228 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1229 1229 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1230 1230 int err;
1231 1231
1232 1232 if (dmu_object_info(os, obj, NULL) != 0)
1233 1233 continue;
1234 1234
1235 - err = dmu_free_object(os, obj);
1235 + err = dmu_free_long_object(os, obj);
1236 1236 if (err != 0)
1237 1237 return (err);
1238 1238 }
1239 1239 return (0);
1240 1240 }
1241 1241
1242 1242 static int
1243 1243 restore_write(struct restorearg *ra, objset_t *os,
1244 1244 struct drr_write *drrw)
1245 1245 {
1246 1246 dmu_tx_t *tx;
1247 1247 void *data;
1248 1248 int err;
1249 1249
1250 1250 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1251 1251 !DMU_OT_IS_VALID(drrw->drr_type))
1252 1252 return (SET_ERROR(EINVAL));
1253 1253
1254 1254 data = restore_read(ra, drrw->drr_length);
1255 1255 if (data == NULL)
1256 1256 return (ra->err);
1257 1257
1258 1258 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1259 1259 return (SET_ERROR(EINVAL));
1260 1260
1261 1261 tx = dmu_tx_create(os);
1262 1262
1263 1263 dmu_tx_hold_write(tx, drrw->drr_object,
1264 1264 drrw->drr_offset, drrw->drr_length);
1265 1265 err = dmu_tx_assign(tx, TXG_WAIT);
1266 1266 if (err != 0) {
1267 1267 dmu_tx_abort(tx);
1268 1268 return (err);
1269 1269 }
1270 1270 if (ra->byteswap) {
1271 1271 dmu_object_byteswap_t byteswap =
1272 1272 DMU_OT_BYTESWAP(drrw->drr_type);
1273 1273 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1274 1274 }
1275 1275 dmu_write(os, drrw->drr_object,
1276 1276 drrw->drr_offset, drrw->drr_length, data, tx);
1277 1277 dmu_tx_commit(tx);
1278 1278 return (0);
1279 1279 }
1280 1280
1281 1281 /*
1282 1282 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1283 1283 * streams to refer to a copy of the data that is already on the
1284 1284 * system because it came in earlier in the stream. This function
1285 1285 * finds the earlier copy of the data, and uses that copy instead of
1286 1286 * data from the stream to fulfill this write.
1287 1287 */
1288 1288 static int
1289 1289 restore_write_byref(struct restorearg *ra, objset_t *os,
1290 1290 struct drr_write_byref *drrwbr)
1291 1291 {
1292 1292 dmu_tx_t *tx;
1293 1293 int err;
1294 1294 guid_map_entry_t gmesrch;
1295 1295 guid_map_entry_t *gmep;
1296 1296 avl_index_t where;
1297 1297 objset_t *ref_os = NULL;
1298 1298 dmu_buf_t *dbp;
1299 1299
1300 1300 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1301 1301 return (SET_ERROR(EINVAL));
1302 1302
1303 1303 /*
1304 1304 * If the GUID of the referenced dataset is different from the
1305 1305 * GUID of the target dataset, find the referenced dataset.
1306 1306 */
1307 1307 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1308 1308 gmesrch.guid = drrwbr->drr_refguid;
1309 1309 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1310 1310 &where)) == NULL) {
1311 1311 return (SET_ERROR(EINVAL));
1312 1312 }
1313 1313 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1314 1314 return (SET_ERROR(EINVAL));
1315 1315 } else {
1316 1316 ref_os = os;
1317 1317 }
1318 1318
1319 1319 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1320 1320 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1321 1321 return (err);
1322 1322
1323 1323 tx = dmu_tx_create(os);
1324 1324
1325 1325 dmu_tx_hold_write(tx, drrwbr->drr_object,
1326 1326 drrwbr->drr_offset, drrwbr->drr_length);
1327 1327 err = dmu_tx_assign(tx, TXG_WAIT);
1328 1328 if (err != 0) {
1329 1329 dmu_tx_abort(tx);
1330 1330 return (err);
1331 1331 }
1332 1332 dmu_write(os, drrwbr->drr_object,
1333 1333 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1334 1334 dmu_buf_rele(dbp, FTAG);
1335 1335 dmu_tx_commit(tx);
1336 1336 return (0);
1337 1337 }
1338 1338
1339 1339 static int
1340 1340 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1341 1341 {
1342 1342 dmu_tx_t *tx;
1343 1343 void *data;
1344 1344 dmu_buf_t *db, *db_spill;
1345 1345 int err;
1346 1346
1347 1347 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1348 1348 drrs->drr_length > SPA_MAXBLOCKSIZE)
1349 1349 return (SET_ERROR(EINVAL));
1350 1350
1351 1351 data = restore_read(ra, drrs->drr_length);
1352 1352 if (data == NULL)
1353 1353 return (ra->err);
1354 1354
1355 1355 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1356 1356 return (SET_ERROR(EINVAL));
1357 1357
1358 1358 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1359 1359 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1360 1360 dmu_buf_rele(db, FTAG);
1361 1361 return (err);
1362 1362 }
1363 1363
1364 1364 tx = dmu_tx_create(os);
1365 1365
1366 1366 dmu_tx_hold_spill(tx, db->db_object);
1367 1367
1368 1368 err = dmu_tx_assign(tx, TXG_WAIT);
1369 1369 if (err != 0) {
1370 1370 dmu_buf_rele(db, FTAG);
1371 1371 dmu_buf_rele(db_spill, FTAG);
1372 1372 dmu_tx_abort(tx);
1373 1373 return (err);
1374 1374 }
1375 1375 dmu_buf_will_dirty(db_spill, tx);
1376 1376
1377 1377 if (db_spill->db_size < drrs->drr_length)
1378 1378 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1379 1379 drrs->drr_length, tx));
1380 1380 bcopy(data, db_spill->db_data, drrs->drr_length);
1381 1381
1382 1382 dmu_buf_rele(db, FTAG);
1383 1383 dmu_buf_rele(db_spill, FTAG);
1384 1384
1385 1385 dmu_tx_commit(tx);
1386 1386 return (0);
1387 1387 }
1388 1388
1389 1389 /* ARGSUSED */
1390 1390 static int
1391 1391 restore_free(struct restorearg *ra, objset_t *os,
1392 1392 struct drr_free *drrf)
1393 1393 {
1394 1394 int err;
1395 1395
1396 1396 if (drrf->drr_length != -1ULL &&
1397 1397 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1398 1398 return (SET_ERROR(EINVAL));
1399 1399
1400 1400 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1401 1401 return (SET_ERROR(EINVAL));
1402 1402
1403 1403 err = dmu_free_long_range(os, drrf->drr_object,
1404 1404 drrf->drr_offset, drrf->drr_length);
1405 1405 return (err);
1406 1406 }
1407 1407
1408 1408 /* used to destroy the drc_ds on error */
1409 1409 static void
1410 1410 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1411 1411 {
1412 1412 char name[MAXNAMELEN];
1413 1413 dsl_dataset_name(drc->drc_ds, name);
1414 1414 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1415 1415 (void) dsl_destroy_head(name);
1416 1416 }
1417 1417
1418 1418 /*
1419 1419 * NB: callers *must* call dmu_recv_end() if this succeeds.
1420 1420 */
1421 1421 int
1422 1422 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1423 1423 int cleanup_fd, uint64_t *action_handlep)
1424 1424 {
1425 1425 struct restorearg ra = { 0 };
1426 1426 dmu_replay_record_t *drr;
1427 1427 objset_t *os;
1428 1428 zio_cksum_t pcksum;
1429 1429 int featureflags;
1430 1430
1431 1431 ra.byteswap = drc->drc_byteswap;
1432 1432 ra.cksum = drc->drc_cksum;
1433 1433 ra.vp = vp;
1434 1434 ra.voff = *voffp;
1435 1435 ra.bufsize = 1<<20;
1436 1436 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1437 1437
1438 1438 /* these were verified in dmu_recv_begin */
1439 1439 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1440 1440 DMU_SUBSTREAM);
1441 1441 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1442 1442
1443 1443 /*
1444 1444 * Open the objset we are modifying.
1445 1445 */
1446 1446 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1447 1447
1448 1448 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1449 1449
1450 1450 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1451 1451
1452 1452 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1453 1453 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1454 1454 minor_t minor;
1455 1455
1456 1456 if (cleanup_fd == -1) {
1457 1457 ra.err = SET_ERROR(EBADF);
1458 1458 goto out;
1459 1459 }
1460 1460 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1461 1461 if (ra.err != 0) {
1462 1462 cleanup_fd = -1;
1463 1463 goto out;
1464 1464 }
1465 1465
1466 1466 if (*action_handlep == 0) {
1467 1467 ra.guid_to_ds_map =
1468 1468 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1469 1469 avl_create(ra.guid_to_ds_map, guid_compare,
1470 1470 sizeof (guid_map_entry_t),
1471 1471 offsetof(guid_map_entry_t, avlnode));
1472 1472 ra.err = zfs_onexit_add_cb(minor,
1473 1473 free_guid_map_onexit, ra.guid_to_ds_map,
1474 1474 action_handlep);
1475 1475 if (ra.err != 0)
1476 1476 goto out;
1477 1477 } else {
1478 1478 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1479 1479 (void **)&ra.guid_to_ds_map);
1480 1480 if (ra.err != 0)
1481 1481 goto out;
1482 1482 }
1483 1483
1484 1484 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1485 1485 }
1486 1486
1487 1487 /*
1488 1488 * Read records and process them.
1489 1489 */
1490 1490 pcksum = ra.cksum;
1491 1491 while (ra.err == 0 &&
1492 1492 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1493 1493 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1494 1494 ra.err = SET_ERROR(EINTR);
1495 1495 goto out;
1496 1496 }
1497 1497
1498 1498 if (ra.byteswap)
1499 1499 backup_byteswap(drr);
1500 1500
1501 1501 switch (drr->drr_type) {
1502 1502 case DRR_OBJECT:
1503 1503 {
1504 1504 /*
1505 1505 * We need to make a copy of the record header,
1506 1506 * because restore_{object,write} may need to
1507 1507 * restore_read(), which will invalidate drr.
1508 1508 */
1509 1509 struct drr_object drro = drr->drr_u.drr_object;
1510 1510 ra.err = restore_object(&ra, os, &drro);
1511 1511 break;
1512 1512 }
1513 1513 case DRR_FREEOBJECTS:
1514 1514 {
1515 1515 struct drr_freeobjects drrfo =
1516 1516 drr->drr_u.drr_freeobjects;
1517 1517 ra.err = restore_freeobjects(&ra, os, &drrfo);
1518 1518 break;
1519 1519 }
1520 1520 case DRR_WRITE:
1521 1521 {
1522 1522 struct drr_write drrw = drr->drr_u.drr_write;
1523 1523 ra.err = restore_write(&ra, os, &drrw);
1524 1524 break;
1525 1525 }
1526 1526 case DRR_WRITE_BYREF:
1527 1527 {
1528 1528 struct drr_write_byref drrwbr =
1529 1529 drr->drr_u.drr_write_byref;
1530 1530 ra.err = restore_write_byref(&ra, os, &drrwbr);
1531 1531 break;
1532 1532 }
1533 1533 case DRR_FREE:
1534 1534 {
1535 1535 struct drr_free drrf = drr->drr_u.drr_free;
1536 1536 ra.err = restore_free(&ra, os, &drrf);
1537 1537 break;
1538 1538 }
1539 1539 case DRR_END:
1540 1540 {
1541 1541 struct drr_end drre = drr->drr_u.drr_end;
1542 1542 /*
1543 1543 * We compare against the *previous* checksum
1544 1544 * value, because the stored checksum is of
1545 1545 * everything before the DRR_END record.
1546 1546 */
1547 1547 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1548 1548 ra.err = SET_ERROR(ECKSUM);
1549 1549 goto out;
1550 1550 }
1551 1551 case DRR_SPILL:
1552 1552 {
1553 1553 struct drr_spill drrs = drr->drr_u.drr_spill;
1554 1554 ra.err = restore_spill(&ra, os, &drrs);
1555 1555 break;
1556 1556 }
1557 1557 default:
1558 1558 ra.err = SET_ERROR(EINVAL);
1559 1559 goto out;
1560 1560 }
1561 1561 pcksum = ra.cksum;
1562 1562 }
1563 1563 ASSERT(ra.err != 0);
1564 1564
1565 1565 out:
1566 1566 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1567 1567 zfs_onexit_fd_rele(cleanup_fd);
1568 1568
1569 1569 if (ra.err != 0) {
1570 1570 /*
1571 1571 * destroy what we created, so we don't leave it in the
1572 1572 * inconsistent restoring state.
1573 1573 */
1574 1574 dmu_recv_cleanup_ds(drc);
1575 1575 }
1576 1576
1577 1577 kmem_free(ra.buf, ra.bufsize);
1578 1578 *voffp = ra.voff;
1579 1579 return (ra.err);
1580 1580 }
1581 1581
1582 1582 static int
1583 1583 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1584 1584 {
1585 1585 dmu_recv_cookie_t *drc = arg;
1586 1586 dsl_pool_t *dp = dmu_tx_pool(tx);
1587 1587 int error;
1588 1588
1589 1589 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1590 1590
1591 1591 if (!drc->drc_newfs) {
1592 1592 dsl_dataset_t *origin_head;
1593 1593
1594 1594 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1595 1595 if (error != 0)
1596 1596 return (error);
1597 1597 if (drc->drc_force) {
1598 1598 /*
1599 1599 * We will destroy any snapshots in tofs (i.e. before
1600 1600 * origin_head) that are after the origin (which is
1601 1601 * the snap before drc_ds, because drc_ds can not
1602 1602 * have any snaps of its own).
1603 1603 */
1604 1604 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj;
1605 1605 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) {
1606 1606 dsl_dataset_t *snap;
1607 1607 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1608 1608 &snap);
1609 1609 if (error != 0)
1610 1610 return (error);
1611 1611 if (snap->ds_dir != origin_head->ds_dir)
1612 1612 error = SET_ERROR(EINVAL);
1613 1613 if (error == 0) {
1614 1614 error = dsl_destroy_snapshot_check_impl(
1615 1615 snap, B_FALSE);
1616 1616 }
1617 1617 obj = snap->ds_phys->ds_prev_snap_obj;
1618 1618 dsl_dataset_rele(snap, FTAG);
1619 1619 if (error != 0)
1620 1620 return (error);
1621 1621 }
1622 1622 }
1623 1623 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1624 1624 origin_head, drc->drc_force, drc->drc_owner, tx);
1625 1625 if (error != 0) {
1626 1626 dsl_dataset_rele(origin_head, FTAG);
1627 1627 return (error);
1628 1628 }
1629 1629 error = dsl_dataset_snapshot_check_impl(origin_head,
1630 1630 drc->drc_tosnap, tx, B_TRUE);
1631 1631 dsl_dataset_rele(origin_head, FTAG);
1632 1632 if (error != 0)
1633 1633 return (error);
1634 1634
1635 1635 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1636 1636 } else {
1637 1637 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1638 1638 drc->drc_tosnap, tx, B_TRUE);
1639 1639 }
1640 1640 return (error);
1641 1641 }
1642 1642
1643 1643 static void
1644 1644 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1645 1645 {
1646 1646 dmu_recv_cookie_t *drc = arg;
1647 1647 dsl_pool_t *dp = dmu_tx_pool(tx);
1648 1648
1649 1649 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1650 1650 tx, "snap=%s", drc->drc_tosnap);
1651 1651
1652 1652 if (!drc->drc_newfs) {
1653 1653 dsl_dataset_t *origin_head;
1654 1654
1655 1655 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1656 1656 &origin_head));
1657 1657
1658 1658 if (drc->drc_force) {
1659 1659 /*
1660 1660 * Destroy any snapshots of drc_tofs (origin_head)
1661 1661 * after the origin (the snap before drc_ds).
1662 1662 */
1663 1663 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj;
1664 1664 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) {
1665 1665 dsl_dataset_t *snap;
1666 1666 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
1667 1667 &snap));
1668 1668 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
1669 1669 obj = snap->ds_phys->ds_prev_snap_obj;
1670 1670 dsl_destroy_snapshot_sync_impl(snap,
1671 1671 B_FALSE, tx);
1672 1672 dsl_dataset_rele(snap, FTAG);
1673 1673 }
1674 1674 }
1675 1675 VERIFY3P(drc->drc_ds->ds_prev, ==,
1676 1676 origin_head->ds_prev);
1677 1677
1678 1678 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
1679 1679 origin_head, tx);
1680 1680 dsl_dataset_snapshot_sync_impl(origin_head,
1681 1681 drc->drc_tosnap, tx);
1682 1682
1683 1683 /* set snapshot's creation time and guid */
1684 1684 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
1685 1685 origin_head->ds_prev->ds_phys->ds_creation_time =
1686 1686 drc->drc_drrb->drr_creation_time;
1687 1687 origin_head->ds_prev->ds_phys->ds_guid =
1688 1688 drc->drc_drrb->drr_toguid;
1689 1689 origin_head->ds_prev->ds_phys->ds_flags &=
1690 1690 ~DS_FLAG_INCONSISTENT;
1691 1691
1692 1692 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
1693 1693 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1694 1694
1695 1695 dsl_dataset_rele(origin_head, FTAG);
1696 1696 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
1697 1697
1698 1698 if (drc->drc_owner != NULL)
1699 1699 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
1700 1700 } else {
1701 1701 dsl_dataset_t *ds = drc->drc_ds;
1702 1702
1703 1703 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
1704 1704
1705 1705 /* set snapshot's creation time and guid */
1706 1706 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1707 1707 ds->ds_prev->ds_phys->ds_creation_time =
1708 1708 drc->drc_drrb->drr_creation_time;
1709 1709 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid;
1710 1710 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1711 1711
1712 1712 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1713 1713 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1714 1714 }
1715 1715 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj;
1716 1716 /*
1717 1717 * Release the hold from dmu_recv_begin. This must be done before
1718 1718 * we return to open context, so that when we free the dataset's dnode,
1719 1719 * we can evict its bonus buffer.
1720 1720 */
1721 1721 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1722 1722 drc->drc_ds = NULL;
1723 1723 }
1724 1724
1725 1725 static int
1726 1726 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
1727 1727 {
1728 1728 dsl_pool_t *dp;
1729 1729 dsl_dataset_t *snapds;
1730 1730 guid_map_entry_t *gmep;
1731 1731 int err;
1732 1732
1733 1733 ASSERT(guid_map != NULL);
1734 1734
1735 1735 err = dsl_pool_hold(name, FTAG, &dp);
1736 1736 if (err != 0)
1737 1737 return (err);
1738 1738 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
1739 1739 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
1740 1740 if (err == 0) {
1741 1741 gmep->guid = snapds->ds_phys->ds_guid;
1742 1742 gmep->gme_ds = snapds;
1743 1743 avl_add(guid_map, gmep);
1744 1744 dsl_dataset_long_hold(snapds, gmep);
1745 1745 } else {
1746 1746 kmem_free(gmep, sizeof (*gmep));
1747 1747 }
1748 1748
1749 1749 dsl_pool_rele(dp, FTAG);
1750 1750 return (err);
1751 1751 }
1752 1752
1753 1753 static int dmu_recv_end_modified_blocks = 3;
1754 1754
1755 1755 static int
1756 1756 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1757 1757 {
1758 1758 int error;
1759 1759 char name[MAXNAMELEN];
1760 1760
1761 1761 #ifdef _KERNEL
1762 1762 /*
1763 1763 * We will be destroying the ds; make sure its origin is unmounted if
1764 1764 * necessary.
1765 1765 */
1766 1766 dsl_dataset_name(drc->drc_ds, name);
1767 1767 zfs_destroy_unmount_origin(name);
1768 1768 #endif
1769 1769
1770 1770 error = dsl_sync_task(drc->drc_tofs,
1771 1771 dmu_recv_end_check, dmu_recv_end_sync, drc,
1772 1772 dmu_recv_end_modified_blocks);
1773 1773
1774 1774 if (error != 0)
1775 1775 dmu_recv_cleanup_ds(drc);
1776 1776 return (error);
1777 1777 }
1778 1778
1779 1779 static int
1780 1780 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1781 1781 {
1782 1782 int error;
1783 1783
1784 1784 error = dsl_sync_task(drc->drc_tofs,
1785 1785 dmu_recv_end_check, dmu_recv_end_sync, drc,
1786 1786 dmu_recv_end_modified_blocks);
1787 1787
1788 1788 if (error != 0) {
1789 1789 dmu_recv_cleanup_ds(drc);
1790 1790 } else if (drc->drc_guid_to_ds_map != NULL) {
1791 1791 (void) add_ds_to_guidmap(drc->drc_tofs,
1792 1792 drc->drc_guid_to_ds_map,
1793 1793 drc->drc_newsnapobj);
1794 1794 }
1795 1795 return (error);
1796 1796 }
1797 1797
1798 1798 int
1799 1799 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
1800 1800 {
1801 1801 drc->drc_owner = owner;
1802 1802
1803 1803 if (drc->drc_newfs)
1804 1804 return (dmu_recv_new_end(drc));
1805 1805 else
1806 1806 return (dmu_recv_existing_end(drc));
1807 1807 }
1808 1808
1809 1809 /*
1810 1810 * Return TRUE if this objset is currently being received into.
1811 1811 */
1812 1812 boolean_t
1813 1813 dmu_objset_is_receiving(objset_t *os)
1814 1814 {
1815 1815 return (os->os_dsl_dataset != NULL &&
1816 1816 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
1817 1817 }
↓ open down ↓ |
572 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX