Print this page
3740 Poor ZFS send / receive performance due to snapshot hold / release processing
Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libzfs_core/common/libzfs_core.c
+++ new/usr/src/lib/libzfs_core/common/libzfs_core.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
28 28 * It has the following characteristics:
29 29 *
30 30 * - Thread Safe. libzfs_core is accessible concurrently from multiple
31 31 * threads. This is accomplished primarily by avoiding global data
32 32 * (e.g. caching). Since it's thread-safe, there is no reason for a
33 33 * process to have multiple libzfs "instances". Therefore, we store
34 34 * our few pieces of data (e.g. the file descriptor) in global
35 35 * variables. The fd is reference-counted so that the libzfs_core
36 36 * library can be "initialized" multiple times (e.g. by different
37 37 * consumers within the same process).
38 38 *
39 39 * - Committed Interface. The libzfs_core interface will be committed,
40 40 * therefore consumers can compile against it and be confident that
41 41 * their code will continue to work on future releases of this code.
42 42 * Currently, the interface is Evolving (not Committed), but we intend
43 43 * to commit to it once it is more complete and we determine that it
44 44 * meets the needs of all consumers.
45 45 *
46 46 * - Programatic Error Handling. libzfs_core communicates errors with
47 47 * defined error numbers, and doesn't print anything to stdout/stderr.
48 48 *
49 49 * - Thin Layer. libzfs_core is a thin layer, marshaling arguments
50 50 * to/from the kernel ioctls. There is generally a 1:1 correspondence
51 51 * between libzfs_core functions and ioctls to /dev/zfs.
52 52 *
53 53 * - Clear Atomicity. Because libzfs_core functions are generally 1:1
54 54 * with kernel ioctls, and kernel ioctls are general atomic, each
55 55 * libzfs_core function is atomic. For example, creating multiple
56 56 * snapshots with a single call to lzc_snapshot() is atomic -- it
57 57 * can't fail with only some of the requested snapshots created, even
58 58 * in the event of power loss or system crash.
59 59 *
60 60 * - Continued libzfs Support. Some higher-level operations (e.g.
61 61 * support for "zfs send -R") are too complicated to fit the scope of
62 62 * libzfs_core. This functionality will continue to live in libzfs.
63 63 * Where appropriate, libzfs will use the underlying atomic operations
64 64 * of libzfs_core. For example, libzfs may implement "zfs send -R |
65 65 * zfs receive" by using individual "send one snapshot", rename,
66 66 * destroy, and "receive one snapshot" operations in libzfs_core.
67 67 * /sbin/zfs and /zbin/zpool will link with both libzfs and
68 68 * libzfs_core. Other consumers should aim to use only libzfs_core,
69 69 * since that will be the supported, stable interface going forwards.
70 70 */
71 71
72 72 #include <libzfs_core.h>
73 73 #include <ctype.h>
74 74 #include <unistd.h>
75 75 #include <stdlib.h>
76 76 #include <string.h>
77 77 #include <errno.h>
78 78 #include <fcntl.h>
79 79 #include <pthread.h>
80 80 #include <sys/nvpair.h>
81 81 #include <sys/param.h>
82 82 #include <sys/types.h>
83 83 #include <sys/stat.h>
84 84 #include <sys/zfs_ioctl.h>
85 85
86 86 static int g_fd;
87 87 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
88 88 static int g_refcount;
89 89
90 90 int
91 91 libzfs_core_init(void)
92 92 {
93 93 (void) pthread_mutex_lock(&g_lock);
94 94 if (g_refcount == 0) {
95 95 g_fd = open("/dev/zfs", O_RDWR);
96 96 if (g_fd < 0) {
97 97 (void) pthread_mutex_unlock(&g_lock);
98 98 return (errno);
99 99 }
100 100 }
101 101 g_refcount++;
102 102 (void) pthread_mutex_unlock(&g_lock);
103 103 return (0);
104 104 }
105 105
106 106 void
107 107 libzfs_core_fini(void)
108 108 {
109 109 (void) pthread_mutex_lock(&g_lock);
110 110 ASSERT3S(g_refcount, >, 0);
111 111 g_refcount--;
112 112 if (g_refcount == 0)
113 113 (void) close(g_fd);
114 114 (void) pthread_mutex_unlock(&g_lock);
115 115 }
116 116
117 117 static int
118 118 lzc_ioctl(zfs_ioc_t ioc, const char *name,
119 119 nvlist_t *source, nvlist_t **resultp)
120 120 {
121 121 zfs_cmd_t zc = { 0 };
122 122 int error = 0;
123 123 char *packed;
124 124 size_t size;
125 125
126 126 ASSERT3S(g_refcount, >, 0);
127 127
128 128 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
129 129
130 130 packed = fnvlist_pack(source, &size);
131 131 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
132 132 zc.zc_nvlist_src_size = size;
133 133
134 134 if (resultp != NULL) {
135 135 *resultp = NULL;
136 136 zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
137 137 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
138 138 malloc(zc.zc_nvlist_dst_size);
139 139 if (zc.zc_nvlist_dst == NULL) {
140 140 error = ENOMEM;
141 141 goto out;
142 142 }
143 143 }
144 144
145 145 while (ioctl(g_fd, ioc, &zc) != 0) {
146 146 if (errno == ENOMEM && resultp != NULL) {
147 147 free((void *)(uintptr_t)zc.zc_nvlist_dst);
148 148 zc.zc_nvlist_dst_size *= 2;
149 149 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
150 150 malloc(zc.zc_nvlist_dst_size);
151 151 if (zc.zc_nvlist_dst == NULL) {
152 152 error = ENOMEM;
153 153 goto out;
154 154 }
155 155 } else {
156 156 error = errno;
157 157 break;
158 158 }
159 159 }
160 160 if (zc.zc_nvlist_dst_filled) {
161 161 *resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
162 162 zc.zc_nvlist_dst_size);
163 163 }
164 164
165 165 out:
166 166 fnvlist_pack_free(packed, size);
167 167 free((void *)(uintptr_t)zc.zc_nvlist_dst);
168 168 return (error);
169 169 }
170 170
171 171 int
172 172 lzc_create(const char *fsname, dmu_objset_type_t type, nvlist_t *props)
173 173 {
174 174 int error;
175 175 nvlist_t *args = fnvlist_alloc();
176 176 fnvlist_add_int32(args, "type", type);
177 177 if (props != NULL)
178 178 fnvlist_add_nvlist(args, "props", props);
179 179 error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
180 180 nvlist_free(args);
181 181 return (error);
182 182 }
183 183
184 184 int
185 185 lzc_clone(const char *fsname, const char *origin,
186 186 nvlist_t *props)
187 187 {
188 188 int error;
189 189 nvlist_t *args = fnvlist_alloc();
190 190 fnvlist_add_string(args, "origin", origin);
191 191 if (props != NULL)
192 192 fnvlist_add_nvlist(args, "props", props);
193 193 error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
194 194 nvlist_free(args);
195 195 return (error);
196 196 }
197 197
198 198 /*
199 199 * Creates snapshots.
200 200 *
201 201 * The keys in the snaps nvlist are the snapshots to be created.
202 202 * They must all be in the same pool.
203 203 *
204 204 * The props nvlist is properties to set. Currently only user properties
205 205 * are supported. { user:prop_name -> string value }
206 206 *
207 207 * The returned results nvlist will have an entry for each snapshot that failed.
208 208 * The value will be the (int32) error code.
209 209 *
210 210 * The return value will be 0 if all snapshots were created, otherwise it will
211 211 * be the errno of a (unspecified) snapshot that failed.
212 212 */
213 213 int
214 214 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
215 215 {
216 216 nvpair_t *elem;
217 217 nvlist_t *args;
218 218 int error;
219 219 char pool[MAXNAMELEN];
220 220
221 221 *errlist = NULL;
222 222
223 223 /* determine the pool name */
224 224 elem = nvlist_next_nvpair(snaps, NULL);
225 225 if (elem == NULL)
226 226 return (0);
227 227 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
228 228 pool[strcspn(pool, "/@")] = '\0';
229 229
230 230 args = fnvlist_alloc();
231 231 fnvlist_add_nvlist(args, "snaps", snaps);
232 232 if (props != NULL)
233 233 fnvlist_add_nvlist(args, "props", props);
234 234
235 235 error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
236 236 nvlist_free(args);
237 237
238 238 return (error);
239 239 }
240 240
241 241 /*
242 242 * Destroys snapshots.
243 243 *
244 244 * The keys in the snaps nvlist are the snapshots to be destroyed.
245 245 * They must all be in the same pool.
246 246 *
↓ open down ↓ |
246 lines elided |
↑ open up ↑ |
247 247 * Snapshots that do not exist will be silently ignored.
248 248 *
249 249 * If 'defer' is not set, and a snapshot has user holds or clones, the
250 250 * destroy operation will fail and none of the snapshots will be
251 251 * destroyed.
252 252 *
253 253 * If 'defer' is set, and a snapshot has user holds or clones, it will be
254 254 * marked for deferred destruction, and will be destroyed when the last hold
255 255 * or clone is removed/destroyed.
256 256 *
257 + * The return value will be ENOENT if none of the snapshots existed.
258 + *
257 259 * The return value will be 0 if all snapshots were destroyed (or marked for
258 - * later destruction if 'defer' is set) or didn't exist to begin with.
260 + * later destruction if 'defer' is set) or didn't exist to begin with and
261 + * at least one snapshot was destroyed.
259 262 *
260 263 * Otherwise the return value will be the errno of a (unspecified) snapshot
261 264 * that failed, no snapshots will be destroyed, and the errlist will have an
262 265 * entry for each snapshot that failed. The value in the errlist will be
263 266 * the (int32) error code.
264 267 */
265 268 int
266 269 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
267 270 {
268 271 nvpair_t *elem;
269 272 nvlist_t *args;
270 273 int error;
271 274 char pool[MAXNAMELEN];
272 275
273 276 /* determine the pool name */
274 277 elem = nvlist_next_nvpair(snaps, NULL);
275 278 if (elem == NULL)
276 279 return (0);
277 280 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
278 281 pool[strcspn(pool, "/@")] = '\0';
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
279 282
280 283 args = fnvlist_alloc();
281 284 fnvlist_add_nvlist(args, "snaps", snaps);
282 285 if (defer)
283 286 fnvlist_add_boolean(args, "defer");
284 287
285 288 error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
286 289 nvlist_free(args);
287 290
288 291 return (error);
289 -
290 292 }
291 293
292 294 int
293 295 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
294 296 uint64_t *usedp)
295 297 {
296 298 nvlist_t *args;
297 299 nvlist_t *result;
298 300 int err;
299 301 char fs[MAXNAMELEN];
300 302 char *atp;
301 303
302 304 /* determine the fs name */
303 305 (void) strlcpy(fs, firstsnap, sizeof (fs));
304 306 atp = strchr(fs, '@');
305 307 if (atp == NULL)
306 308 return (EINVAL);
307 309 *atp = '\0';
308 310
309 311 args = fnvlist_alloc();
310 312 fnvlist_add_string(args, "firstsnap", firstsnap);
311 313
312 314 err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
313 315 nvlist_free(args);
314 316 if (err == 0)
315 317 *usedp = fnvlist_lookup_uint64(result, "used");
316 318 fnvlist_free(result);
317 319
318 320 return (err);
319 321 }
320 322
321 323 boolean_t
322 324 lzc_exists(const char *dataset)
323 325 {
324 326 /*
325 327 * The objset_stats ioctl is still legacy, so we need to construct our
326 328 * own zfs_cmd_t rather than using zfsc_ioctl().
327 329 */
328 330 zfs_cmd_t zc = { 0 };
329 331
330 332 (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
331 333 return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
332 334 }
333 335
334 336 /*
335 337 * Create "user holds" on snapshots. If there is a hold on a snapshot,
336 338 * the snapshot can not be destroyed. (However, it can be marked for deletion
337 339 * by lzc_destroy_snaps(defer=B_TRUE).)
338 340 *
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
339 341 * The keys in the nvlist are snapshot names.
340 342 * The snapshots must all be in the same pool.
341 343 * The value is the name of the hold (string type).
342 344 *
343 345 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
344 346 * In this case, when the cleanup_fd is closed (including on process
345 347 * termination), the holds will be released. If the system is shut down
346 348 * uncleanly, the holds will be released when the pool is next opened
347 349 * or imported.
348 350 *
349 - * The return value will be 0 if all holds were created. Otherwise the return
350 - * value will be the errno of a (unspecified) hold that failed, no holds will
351 - * be created, and the errlist will have an entry for each hold that
352 - * failed (name = snapshot). The value in the errlist will be the error
353 - * code (int32).
351 + * Holds for snapshots which don't exist will be skipped and have an entry
352 + * added to errlist, but will not cause an overall failure, except in the
353 + * case that all holds where skipped.
354 + *
355 + * The return value will be ENOENT if none of the snapshots for the requested
356 + * holds existed.
357 + *
358 + * The return value will be 0 if the nvl holds was empty or all holds, for
359 + * snapshots that existed, were succesfully created and at least one hold
360 + * was created.
361 + *
362 + * Otherwise the return value will be the errno of a (unspecified) hold that
363 + * failed and no holds will be created.
364 + *
365 + * In all cases the errlist will have an entry for each hold that failed
366 + * (name = snapshot), with its value being the error code (int32).
354 367 */
355 368 int
356 369 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
357 370 {
358 371 char pool[MAXNAMELEN];
359 372 nvlist_t *args;
360 373 nvpair_t *elem;
361 374 int error;
362 375
363 376 /* determine the pool name */
364 377 elem = nvlist_next_nvpair(holds, NULL);
365 378 if (elem == NULL)
366 379 return (0);
367 380 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
368 381 pool[strcspn(pool, "/@")] = '\0';
369 382
370 383 args = fnvlist_alloc();
371 384 fnvlist_add_nvlist(args, "holds", holds);
372 385 if (cleanup_fd != -1)
373 386 fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
374 387
375 388 error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
376 389 nvlist_free(args);
377 390 return (error);
378 391 }
379 392
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
380 393 /*
381 394 * Release "user holds" on snapshots. If the snapshot has been marked for
382 395 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
383 396 * any clones, and all the user holds are removed, then the snapshot will be
384 397 * destroyed.
385 398 *
386 399 * The keys in the nvlist are snapshot names.
387 400 * The snapshots must all be in the same pool.
388 401 * The value is a nvlist whose keys are the holds to remove.
389 402 *
390 - * The return value will be 0 if all holds were removed.
391 - * Otherwise the return value will be the errno of a (unspecified) release
392 - * that failed, no holds will be released, and the errlist will have an
393 - * entry for each snapshot that has failed releases (name = snapshot).
394 - * The value in the errlist will be the error code (int32) of a failed release.
403 + * Holds which failed to release because they didn't exist will have an entry
404 + * added to errlist, but will not cause an overall failure, except in the
405 + * case that all releases where skipped.
406 + *
407 + * The return value will be ENOENT if none of the specified holds existed.
408 + *
409 + * The return value will be 0 if the nvl holds was empty or all holds, that
410 + * existed, were succesfully removed and at least one hold was removed.
411 + *
412 + * Otherwise the return value will be the errno of a (unspecified) hold that
413 + * failed to release and no holds will be released.
414 + *
415 + * In all cases the errlist will have an entry for each hold that failed to
416 + * to release.
395 417 */
396 418 int
397 419 lzc_release(nvlist_t *holds, nvlist_t **errlist)
398 420 {
399 421 char pool[MAXNAMELEN];
400 422 nvpair_t *elem;
401 423
402 424 /* determine the pool name */
403 425 elem = nvlist_next_nvpair(holds, NULL);
404 426 if (elem == NULL)
405 427 return (0);
406 428 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
407 429 pool[strcspn(pool, "/@")] = '\0';
408 430
409 431 return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
410 432 }
411 433
412 434 /*
413 435 * Retrieve list of user holds on the specified snapshot.
414 436 *
415 437 * On success, *holdsp will be set to a nvlist which the caller must free.
416 438 * The keys are the names of the holds, and the value is the creation time
417 439 * of the hold (uint64) in seconds since the epoch.
418 440 */
419 441 int
420 442 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
421 443 {
422 444 int error;
423 445 nvlist_t *innvl = fnvlist_alloc();
424 446 error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
425 447 fnvlist_free(innvl);
426 448 return (error);
427 449 }
428 450
429 451 /*
430 452 * If fromsnap is NULL, a full (non-incremental) stream will be sent.
431 453 */
432 454 int
433 455 lzc_send(const char *snapname, const char *fromsnap, int fd)
434 456 {
435 457 nvlist_t *args;
436 458 int err;
437 459
438 460 args = fnvlist_alloc();
439 461 fnvlist_add_int32(args, "fd", fd);
440 462 if (fromsnap != NULL)
441 463 fnvlist_add_string(args, "fromsnap", fromsnap);
442 464 err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
443 465 nvlist_free(args);
444 466 return (err);
445 467 }
446 468
447 469 /*
448 470 * If fromsnap is NULL, a full (non-incremental) stream will be estimated.
449 471 */
450 472 int
451 473 lzc_send_space(const char *snapname, const char *fromsnap, uint64_t *spacep)
452 474 {
453 475 nvlist_t *args;
454 476 nvlist_t *result;
455 477 int err;
456 478
457 479 args = fnvlist_alloc();
458 480 if (fromsnap != NULL)
459 481 fnvlist_add_string(args, "fromsnap", fromsnap);
460 482 err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
461 483 nvlist_free(args);
462 484 if (err == 0)
463 485 *spacep = fnvlist_lookup_uint64(result, "space");
464 486 nvlist_free(result);
465 487 return (err);
466 488 }
467 489
468 490 static int
469 491 recv_read(int fd, void *buf, int ilen)
470 492 {
471 493 char *cp = buf;
472 494 int rv;
473 495 int len = ilen;
474 496
475 497 do {
476 498 rv = read(fd, cp, len);
477 499 cp += rv;
478 500 len -= rv;
479 501 } while (rv > 0);
480 502
481 503 if (rv < 0 || len != 0)
482 504 return (EIO);
483 505
484 506 return (0);
485 507 }
486 508
487 509 /*
488 510 * The simplest receive case: receive from the specified fd, creating the
489 511 * specified snapshot. Apply the specified properties a "received" properties
490 512 * (which can be overridden by locally-set properties). If the stream is a
491 513 * clone, its origin snapshot must be specified by 'origin'. The 'force'
492 514 * flag will cause the target filesystem to be rolled back or destroyed if
493 515 * necessary to receive.
494 516 *
495 517 * Return 0 on success or an errno on failure.
496 518 *
497 519 * Note: this interface does not work on dedup'd streams
498 520 * (those with DMU_BACKUP_FEATURE_DEDUP).
499 521 */
500 522 int
501 523 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
502 524 boolean_t force, int fd)
503 525 {
504 526 /*
505 527 * The receive ioctl is still legacy, so we need to construct our own
506 528 * zfs_cmd_t rather than using zfsc_ioctl().
507 529 */
508 530 zfs_cmd_t zc = { 0 };
509 531 char *atp;
510 532 char *packed = NULL;
511 533 size_t size;
512 534 dmu_replay_record_t drr;
513 535 int error;
514 536
515 537 ASSERT3S(g_refcount, >, 0);
516 538
517 539 /* zc_name is name of containing filesystem */
518 540 (void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
519 541 atp = strchr(zc.zc_name, '@');
520 542 if (atp == NULL)
521 543 return (EINVAL);
522 544 *atp = '\0';
523 545
524 546 /* if the fs does not exist, try its parent. */
525 547 if (!lzc_exists(zc.zc_name)) {
526 548 char *slashp = strrchr(zc.zc_name, '/');
527 549 if (slashp == NULL)
528 550 return (ENOENT);
529 551 *slashp = '\0';
530 552
531 553 }
532 554
533 555 /* zc_value is full name of the snapshot to create */
534 556 (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
535 557
536 558 if (props != NULL) {
537 559 /* zc_nvlist_src is props to set */
538 560 packed = fnvlist_pack(props, &size);
539 561 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
540 562 zc.zc_nvlist_src_size = size;
541 563 }
542 564
543 565 /* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
544 566 if (origin != NULL)
545 567 (void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
546 568
547 569 /* zc_begin_record is non-byteswapped BEGIN record */
548 570 error = recv_read(fd, &drr, sizeof (drr));
549 571 if (error != 0)
550 572 goto out;
551 573 zc.zc_begin_record = drr.drr_u.drr_begin;
552 574
553 575 /* zc_cookie is fd to read from */
554 576 zc.zc_cookie = fd;
555 577
556 578 /* zc guid is force flag */
557 579 zc.zc_guid = force;
558 580
559 581 /* zc_cleanup_fd is unused */
560 582 zc.zc_cleanup_fd = -1;
561 583
562 584 error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
563 585 if (error != 0)
564 586 error = errno;
565 587
566 588 out:
567 589 if (packed != NULL)
568 590 fnvlist_pack_free(packed, size);
569 591 free((void*)(uintptr_t)zc.zc_nvlist_dst);
570 592 return (error);
571 593 }
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX