Print this page
3740 Poor ZFS send / receive performance due to snapshot hold / release processing
Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libzfs_core/common/libzfs_core.c
+++ new/usr/src/lib/libzfs_core/common/libzfs_core.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 + * Copyright (c) 2013 Steven Hartland. All rights reserved.
24 25 */
25 26
26 27 /*
27 28 * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
28 29 * It has the following characteristics:
29 30 *
30 31 * - Thread Safe. libzfs_core is accessible concurrently from multiple
31 32 * threads. This is accomplished primarily by avoiding global data
32 33 * (e.g. caching). Since it's thread-safe, there is no reason for a
33 34 * process to have multiple libzfs "instances". Therefore, we store
34 35 * our few pieces of data (e.g. the file descriptor) in global
35 36 * variables. The fd is reference-counted so that the libzfs_core
36 37 * library can be "initialized" multiple times (e.g. by different
37 38 * consumers within the same process).
38 39 *
39 40 * - Committed Interface. The libzfs_core interface will be committed,
40 41 * therefore consumers can compile against it and be confident that
41 42 * their code will continue to work on future releases of this code.
42 43 * Currently, the interface is Evolving (not Committed), but we intend
43 44 * to commit to it once it is more complete and we determine that it
44 45 * meets the needs of all consumers.
45 46 *
46 47 * - Programatic Error Handling. libzfs_core communicates errors with
47 48 * defined error numbers, and doesn't print anything to stdout/stderr.
48 49 *
49 50 * - Thin Layer. libzfs_core is a thin layer, marshaling arguments
50 51 * to/from the kernel ioctls. There is generally a 1:1 correspondence
51 52 * between libzfs_core functions and ioctls to /dev/zfs.
52 53 *
53 54 * - Clear Atomicity. Because libzfs_core functions are generally 1:1
54 55 * with kernel ioctls, and kernel ioctls are general atomic, each
55 56 * libzfs_core function is atomic. For example, creating multiple
56 57 * snapshots with a single call to lzc_snapshot() is atomic -- it
57 58 * can't fail with only some of the requested snapshots created, even
58 59 * in the event of power loss or system crash.
59 60 *
60 61 * - Continued libzfs Support. Some higher-level operations (e.g.
61 62 * support for "zfs send -R") are too complicated to fit the scope of
62 63 * libzfs_core. This functionality will continue to live in libzfs.
63 64 * Where appropriate, libzfs will use the underlying atomic operations
64 65 * of libzfs_core. For example, libzfs may implement "zfs send -R |
65 66 * zfs receive" by using individual "send one snapshot", rename,
66 67 * destroy, and "receive one snapshot" operations in libzfs_core.
67 68 * /sbin/zfs and /zbin/zpool will link with both libzfs and
68 69 * libzfs_core. Other consumers should aim to use only libzfs_core,
69 70 * since that will be the supported, stable interface going forwards.
70 71 */
71 72
72 73 #include <libzfs_core.h>
73 74 #include <ctype.h>
74 75 #include <unistd.h>
75 76 #include <stdlib.h>
76 77 #include <string.h>
77 78 #include <errno.h>
78 79 #include <fcntl.h>
79 80 #include <pthread.h>
80 81 #include <sys/nvpair.h>
81 82 #include <sys/param.h>
82 83 #include <sys/types.h>
83 84 #include <sys/stat.h>
84 85 #include <sys/zfs_ioctl.h>
85 86
86 87 static int g_fd;
87 88 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
88 89 static int g_refcount;
89 90
90 91 int
91 92 libzfs_core_init(void)
92 93 {
93 94 (void) pthread_mutex_lock(&g_lock);
94 95 if (g_refcount == 0) {
95 96 g_fd = open("/dev/zfs", O_RDWR);
96 97 if (g_fd < 0) {
97 98 (void) pthread_mutex_unlock(&g_lock);
98 99 return (errno);
99 100 }
100 101 }
101 102 g_refcount++;
102 103 (void) pthread_mutex_unlock(&g_lock);
103 104 return (0);
104 105 }
105 106
106 107 void
107 108 libzfs_core_fini(void)
108 109 {
109 110 (void) pthread_mutex_lock(&g_lock);
110 111 ASSERT3S(g_refcount, >, 0);
111 112 g_refcount--;
112 113 if (g_refcount == 0)
113 114 (void) close(g_fd);
114 115 (void) pthread_mutex_unlock(&g_lock);
115 116 }
116 117
117 118 static int
118 119 lzc_ioctl(zfs_ioc_t ioc, const char *name,
119 120 nvlist_t *source, nvlist_t **resultp)
120 121 {
121 122 zfs_cmd_t zc = { 0 };
122 123 int error = 0;
123 124 char *packed;
124 125 size_t size;
125 126
126 127 ASSERT3S(g_refcount, >, 0);
127 128
128 129 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
129 130
130 131 packed = fnvlist_pack(source, &size);
131 132 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
132 133 zc.zc_nvlist_src_size = size;
133 134
134 135 if (resultp != NULL) {
135 136 *resultp = NULL;
136 137 zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
137 138 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
138 139 malloc(zc.zc_nvlist_dst_size);
139 140 if (zc.zc_nvlist_dst == NULL) {
140 141 error = ENOMEM;
141 142 goto out;
142 143 }
143 144 }
144 145
145 146 while (ioctl(g_fd, ioc, &zc) != 0) {
146 147 if (errno == ENOMEM && resultp != NULL) {
147 148 free((void *)(uintptr_t)zc.zc_nvlist_dst);
148 149 zc.zc_nvlist_dst_size *= 2;
149 150 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
150 151 malloc(zc.zc_nvlist_dst_size);
151 152 if (zc.zc_nvlist_dst == NULL) {
152 153 error = ENOMEM;
153 154 goto out;
154 155 }
155 156 } else {
156 157 error = errno;
157 158 break;
158 159 }
159 160 }
160 161 if (zc.zc_nvlist_dst_filled) {
161 162 *resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
162 163 zc.zc_nvlist_dst_size);
163 164 }
164 165
165 166 out:
166 167 fnvlist_pack_free(packed, size);
167 168 free((void *)(uintptr_t)zc.zc_nvlist_dst);
168 169 return (error);
169 170 }
170 171
171 172 int
172 173 lzc_create(const char *fsname, dmu_objset_type_t type, nvlist_t *props)
173 174 {
174 175 int error;
175 176 nvlist_t *args = fnvlist_alloc();
176 177 fnvlist_add_int32(args, "type", type);
177 178 if (props != NULL)
178 179 fnvlist_add_nvlist(args, "props", props);
179 180 error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
180 181 nvlist_free(args);
181 182 return (error);
182 183 }
183 184
184 185 int
185 186 lzc_clone(const char *fsname, const char *origin,
186 187 nvlist_t *props)
187 188 {
188 189 int error;
189 190 nvlist_t *args = fnvlist_alloc();
190 191 fnvlist_add_string(args, "origin", origin);
191 192 if (props != NULL)
192 193 fnvlist_add_nvlist(args, "props", props);
193 194 error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
194 195 nvlist_free(args);
195 196 return (error);
196 197 }
197 198
198 199 /*
199 200 * Creates snapshots.
200 201 *
201 202 * The keys in the snaps nvlist are the snapshots to be created.
202 203 * They must all be in the same pool.
203 204 *
204 205 * The props nvlist is properties to set. Currently only user properties
205 206 * are supported. { user:prop_name -> string value }
206 207 *
207 208 * The returned results nvlist will have an entry for each snapshot that failed.
208 209 * The value will be the (int32) error code.
209 210 *
210 211 * The return value will be 0 if all snapshots were created, otherwise it will
211 212 * be the errno of a (unspecified) snapshot that failed.
212 213 */
213 214 int
214 215 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
215 216 {
216 217 nvpair_t *elem;
217 218 nvlist_t *args;
218 219 int error;
219 220 char pool[MAXNAMELEN];
220 221
221 222 *errlist = NULL;
222 223
223 224 /* determine the pool name */
224 225 elem = nvlist_next_nvpair(snaps, NULL);
225 226 if (elem == NULL)
226 227 return (0);
227 228 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
228 229 pool[strcspn(pool, "/@")] = '\0';
229 230
230 231 args = fnvlist_alloc();
231 232 fnvlist_add_nvlist(args, "snaps", snaps);
232 233 if (props != NULL)
233 234 fnvlist_add_nvlist(args, "props", props);
234 235
235 236 error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
236 237 nvlist_free(args);
237 238
238 239 return (error);
239 240 }
240 241
241 242 /*
242 243 * Destroys snapshots.
243 244 *
244 245 * The keys in the snaps nvlist are the snapshots to be destroyed.
245 246 * They must all be in the same pool.
246 247 *
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
247 248 * Snapshots that do not exist will be silently ignored.
248 249 *
249 250 * If 'defer' is not set, and a snapshot has user holds or clones, the
250 251 * destroy operation will fail and none of the snapshots will be
251 252 * destroyed.
252 253 *
253 254 * If 'defer' is set, and a snapshot has user holds or clones, it will be
254 255 * marked for deferred destruction, and will be destroyed when the last hold
255 256 * or clone is removed/destroyed.
256 257 *
258 + * The return value will be ENOENT if none of the snapshots existed.
259 + *
257 260 * The return value will be 0 if all snapshots were destroyed (or marked for
258 - * later destruction if 'defer' is set) or didn't exist to begin with.
261 + * later destruction if 'defer' is set) or didn't exist to begin with and
262 + * at least one snapshot was destroyed.
259 263 *
260 264 * Otherwise the return value will be the errno of a (unspecified) snapshot
261 265 * that failed, no snapshots will be destroyed, and the errlist will have an
262 266 * entry for each snapshot that failed. The value in the errlist will be
263 267 * the (int32) error code.
264 268 */
265 269 int
266 270 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
267 271 {
268 272 nvpair_t *elem;
269 273 nvlist_t *args;
270 274 int error;
271 275 char pool[MAXNAMELEN];
272 276
273 277 /* determine the pool name */
274 278 elem = nvlist_next_nvpair(snaps, NULL);
275 279 if (elem == NULL)
276 280 return (0);
277 281 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
278 282 pool[strcspn(pool, "/@")] = '\0';
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
279 283
280 284 args = fnvlist_alloc();
281 285 fnvlist_add_nvlist(args, "snaps", snaps);
282 286 if (defer)
283 287 fnvlist_add_boolean(args, "defer");
284 288
285 289 error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
286 290 nvlist_free(args);
287 291
288 292 return (error);
289 -
290 293 }
291 294
292 295 int
293 296 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
294 297 uint64_t *usedp)
295 298 {
296 299 nvlist_t *args;
297 300 nvlist_t *result;
298 301 int err;
299 302 char fs[MAXNAMELEN];
300 303 char *atp;
301 304
302 305 /* determine the fs name */
303 306 (void) strlcpy(fs, firstsnap, sizeof (fs));
304 307 atp = strchr(fs, '@');
305 308 if (atp == NULL)
306 309 return (EINVAL);
307 310 *atp = '\0';
308 311
309 312 args = fnvlist_alloc();
310 313 fnvlist_add_string(args, "firstsnap", firstsnap);
311 314
312 315 err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
313 316 nvlist_free(args);
314 317 if (err == 0)
315 318 *usedp = fnvlist_lookup_uint64(result, "used");
316 319 fnvlist_free(result);
317 320
318 321 return (err);
319 322 }
320 323
321 324 boolean_t
322 325 lzc_exists(const char *dataset)
323 326 {
324 327 /*
325 328 * The objset_stats ioctl is still legacy, so we need to construct our
326 329 * own zfs_cmd_t rather than using zfsc_ioctl().
327 330 */
328 331 zfs_cmd_t zc = { 0 };
329 332
330 333 (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
331 334 return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
332 335 }
333 336
334 337 /*
335 338 * Create "user holds" on snapshots. If there is a hold on a snapshot,
336 339 * the snapshot can not be destroyed. (However, it can be marked for deletion
337 340 * by lzc_destroy_snaps(defer=B_TRUE).)
338 341 *
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
339 342 * The keys in the nvlist are snapshot names.
340 343 * The snapshots must all be in the same pool.
341 344 * The value is the name of the hold (string type).
342 345 *
343 346 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
344 347 * In this case, when the cleanup_fd is closed (including on process
345 348 * termination), the holds will be released. If the system is shut down
346 349 * uncleanly, the holds will be released when the pool is next opened
347 350 * or imported.
348 351 *
349 - * The return value will be 0 if all holds were created. Otherwise the return
350 - * value will be the errno of a (unspecified) hold that failed, no holds will
351 - * be created, and the errlist will have an entry for each hold that
352 - * failed (name = snapshot). The value in the errlist will be the error
353 - * code (int32).
352 + * Holds for snapshots which don't exist will be skipped and have an entry
353 + * added to errlist, but will not cause an overall failure, except in the
354 + * case that all holds where skipped.
355 + *
356 + * The return value will be ENOENT if none of the snapshots for the requested
357 + * holds existed.
358 + *
359 + * The return value will be 0 if the nvl holds was empty or all holds, for
360 + * snapshots that existed, were succesfully created and at least one hold
361 + * was created.
362 + *
363 + * Otherwise the return value will be the errno of a (unspecified) hold that
364 + * failed and no holds will be created.
365 + *
366 + * In all cases the errlist will have an entry for each hold that failed
367 + * (name = snapshot), with its value being the error code (int32).
354 368 */
355 369 int
356 370 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
357 371 {
358 372 char pool[MAXNAMELEN];
359 373 nvlist_t *args;
360 374 nvpair_t *elem;
361 375 int error;
362 376
363 377 /* determine the pool name */
364 378 elem = nvlist_next_nvpair(holds, NULL);
365 379 if (elem == NULL)
366 380 return (0);
367 381 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
368 382 pool[strcspn(pool, "/@")] = '\0';
369 383
370 384 args = fnvlist_alloc();
371 385 fnvlist_add_nvlist(args, "holds", holds);
372 386 if (cleanup_fd != -1)
373 387 fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
374 388
375 389 error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
376 390 nvlist_free(args);
377 391 return (error);
378 392 }
379 393
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
380 394 /*
381 395 * Release "user holds" on snapshots. If the snapshot has been marked for
382 396 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
383 397 * any clones, and all the user holds are removed, then the snapshot will be
384 398 * destroyed.
385 399 *
386 400 * The keys in the nvlist are snapshot names.
387 401 * The snapshots must all be in the same pool.
388 402 * The value is a nvlist whose keys are the holds to remove.
389 403 *
390 - * The return value will be 0 if all holds were removed.
391 - * Otherwise the return value will be the errno of a (unspecified) release
392 - * that failed, no holds will be released, and the errlist will have an
393 - * entry for each snapshot that has failed releases (name = snapshot).
394 - * The value in the errlist will be the error code (int32) of a failed release.
404 + * Holds which failed to release because they didn't exist will have an entry
405 + * added to errlist, but will not cause an overall failure, except in the
406 + * case that all releases where skipped.
407 + *
408 + * The return value will be ENOENT if none of the specified holds existed.
409 + *
410 + * The return value will be 0 if the nvl holds was empty or all holds that
411 + * existed, were successfully removed and at least one hold was removed.
412 + *
413 + * Otherwise the return value will be the errno of a (unspecified) hold that
414 + * failed to release and no holds will be released.
415 + *
416 + * In all cases the errlist will have an entry for each hold that failed to
417 + * to release.
395 418 */
396 419 int
397 420 lzc_release(nvlist_t *holds, nvlist_t **errlist)
398 421 {
399 422 char pool[MAXNAMELEN];
400 423 nvpair_t *elem;
401 424
402 425 /* determine the pool name */
403 426 elem = nvlist_next_nvpair(holds, NULL);
404 427 if (elem == NULL)
405 428 return (0);
406 429 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
407 430 pool[strcspn(pool, "/@")] = '\0';
408 431
409 432 return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
410 433 }
411 434
412 435 /*
413 436 * Retrieve list of user holds on the specified snapshot.
414 437 *
415 438 * On success, *holdsp will be set to a nvlist which the caller must free.
416 439 * The keys are the names of the holds, and the value is the creation time
417 440 * of the hold (uint64) in seconds since the epoch.
418 441 */
419 442 int
420 443 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
421 444 {
422 445 int error;
423 446 nvlist_t *innvl = fnvlist_alloc();
424 447 error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
425 448 fnvlist_free(innvl);
426 449 return (error);
427 450 }
428 451
429 452 /*
430 453 * If fromsnap is NULL, a full (non-incremental) stream will be sent.
431 454 */
432 455 int
433 456 lzc_send(const char *snapname, const char *fromsnap, int fd)
434 457 {
435 458 nvlist_t *args;
436 459 int err;
437 460
438 461 args = fnvlist_alloc();
439 462 fnvlist_add_int32(args, "fd", fd);
440 463 if (fromsnap != NULL)
441 464 fnvlist_add_string(args, "fromsnap", fromsnap);
442 465 err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
443 466 nvlist_free(args);
444 467 return (err);
445 468 }
446 469
447 470 /*
448 471 * If fromsnap is NULL, a full (non-incremental) stream will be estimated.
449 472 */
450 473 int
451 474 lzc_send_space(const char *snapname, const char *fromsnap, uint64_t *spacep)
452 475 {
453 476 nvlist_t *args;
454 477 nvlist_t *result;
455 478 int err;
456 479
457 480 args = fnvlist_alloc();
458 481 if (fromsnap != NULL)
459 482 fnvlist_add_string(args, "fromsnap", fromsnap);
460 483 err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
461 484 nvlist_free(args);
462 485 if (err == 0)
463 486 *spacep = fnvlist_lookup_uint64(result, "space");
464 487 nvlist_free(result);
465 488 return (err);
466 489 }
467 490
468 491 static int
469 492 recv_read(int fd, void *buf, int ilen)
470 493 {
471 494 char *cp = buf;
472 495 int rv;
473 496 int len = ilen;
474 497
475 498 do {
476 499 rv = read(fd, cp, len);
477 500 cp += rv;
478 501 len -= rv;
479 502 } while (rv > 0);
480 503
481 504 if (rv < 0 || len != 0)
482 505 return (EIO);
483 506
484 507 return (0);
485 508 }
486 509
487 510 /*
488 511 * The simplest receive case: receive from the specified fd, creating the
489 512 * specified snapshot. Apply the specified properties a "received" properties
490 513 * (which can be overridden by locally-set properties). If the stream is a
491 514 * clone, its origin snapshot must be specified by 'origin'. The 'force'
492 515 * flag will cause the target filesystem to be rolled back or destroyed if
493 516 * necessary to receive.
494 517 *
495 518 * Return 0 on success or an errno on failure.
496 519 *
497 520 * Note: this interface does not work on dedup'd streams
498 521 * (those with DMU_BACKUP_FEATURE_DEDUP).
499 522 */
500 523 int
501 524 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
502 525 boolean_t force, int fd)
503 526 {
504 527 /*
505 528 * The receive ioctl is still legacy, so we need to construct our own
506 529 * zfs_cmd_t rather than using zfsc_ioctl().
507 530 */
508 531 zfs_cmd_t zc = { 0 };
509 532 char *atp;
510 533 char *packed = NULL;
511 534 size_t size;
512 535 dmu_replay_record_t drr;
513 536 int error;
514 537
515 538 ASSERT3S(g_refcount, >, 0);
516 539
517 540 /* zc_name is name of containing filesystem */
518 541 (void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
519 542 atp = strchr(zc.zc_name, '@');
520 543 if (atp == NULL)
521 544 return (EINVAL);
522 545 *atp = '\0';
523 546
524 547 /* if the fs does not exist, try its parent. */
525 548 if (!lzc_exists(zc.zc_name)) {
526 549 char *slashp = strrchr(zc.zc_name, '/');
527 550 if (slashp == NULL)
528 551 return (ENOENT);
529 552 *slashp = '\0';
530 553
531 554 }
532 555
533 556 /* zc_value is full name of the snapshot to create */
534 557 (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
535 558
536 559 if (props != NULL) {
537 560 /* zc_nvlist_src is props to set */
538 561 packed = fnvlist_pack(props, &size);
539 562 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
540 563 zc.zc_nvlist_src_size = size;
541 564 }
542 565
543 566 /* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
544 567 if (origin != NULL)
545 568 (void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
546 569
547 570 /* zc_begin_record is non-byteswapped BEGIN record */
548 571 error = recv_read(fd, &drr, sizeof (drr));
549 572 if (error != 0)
550 573 goto out;
551 574 zc.zc_begin_record = drr.drr_u.drr_begin;
552 575
553 576 /* zc_cookie is fd to read from */
554 577 zc.zc_cookie = fd;
555 578
556 579 /* zc guid is force flag */
557 580 zc.zc_guid = force;
558 581
559 582 /* zc_cleanup_fd is unused */
560 583 zc.zc_cleanup_fd = -1;
561 584
562 585 error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
563 586 if (error != 0)
564 587 error = errno;
565 588
566 589 out:
567 590 if (packed != NULL)
568 591 fnvlist_pack_free(packed, size);
569 592 free((void*)(uintptr_t)zc.zc_nvlist_dst);
570 593 return (error);
571 594 }
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX