Print this page
3740 Poor ZFS send / receive performance due to snapshot hold / release processing
Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libzfs_core/common/libzfs_core.c
+++ new/usr/src/lib/libzfs_core/common/libzfs_core.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
28 28 * It has the following characteristics:
29 29 *
30 30 * - Thread Safe. libzfs_core is accessible concurrently from multiple
31 31 * threads. This is accomplished primarily by avoiding global data
32 32 * (e.g. caching). Since it's thread-safe, there is no reason for a
33 33 * process to have multiple libzfs "instances". Therefore, we store
34 34 * our few pieces of data (e.g. the file descriptor) in global
35 35 * variables. The fd is reference-counted so that the libzfs_core
36 36 * library can be "initialized" multiple times (e.g. by different
37 37 * consumers within the same process).
38 38 *
39 39 * - Committed Interface. The libzfs_core interface will be committed,
40 40 * therefore consumers can compile against it and be confident that
41 41 * their code will continue to work on future releases of this code.
42 42 * Currently, the interface is Evolving (not Committed), but we intend
43 43 * to commit to it once it is more complete and we determine that it
44 44 * meets the needs of all consumers.
45 45 *
46 46 * - Programatic Error Handling. libzfs_core communicates errors with
47 47 * defined error numbers, and doesn't print anything to stdout/stderr.
48 48 *
49 49 * - Thin Layer. libzfs_core is a thin layer, marshaling arguments
50 50 * to/from the kernel ioctls. There is generally a 1:1 correspondence
51 51 * between libzfs_core functions and ioctls to /dev/zfs.
52 52 *
53 53 * - Clear Atomicity. Because libzfs_core functions are generally 1:1
54 54 * with kernel ioctls, and kernel ioctls are general atomic, each
55 55 * libzfs_core function is atomic. For example, creating multiple
56 56 * snapshots with a single call to lzc_snapshot() is atomic -- it
57 57 * can't fail with only some of the requested snapshots created, even
58 58 * in the event of power loss or system crash.
59 59 *
60 60 * - Continued libzfs Support. Some higher-level operations (e.g.
61 61 * support for "zfs send -R") are too complicated to fit the scope of
62 62 * libzfs_core. This functionality will continue to live in libzfs.
63 63 * Where appropriate, libzfs will use the underlying atomic operations
64 64 * of libzfs_core. For example, libzfs may implement "zfs send -R |
65 65 * zfs receive" by using individual "send one snapshot", rename,
66 66 * destroy, and "receive one snapshot" operations in libzfs_core.
67 67 * /sbin/zfs and /zbin/zpool will link with both libzfs and
68 68 * libzfs_core. Other consumers should aim to use only libzfs_core,
69 69 * since that will be the supported, stable interface going forwards.
70 70 */
71 71
72 72 #include <libzfs_core.h>
73 73 #include <ctype.h>
74 74 #include <unistd.h>
75 75 #include <stdlib.h>
76 76 #include <string.h>
77 77 #include <errno.h>
78 78 #include <fcntl.h>
79 79 #include <pthread.h>
80 80 #include <sys/nvpair.h>
81 81 #include <sys/param.h>
82 82 #include <sys/types.h>
83 83 #include <sys/stat.h>
84 84 #include <sys/zfs_ioctl.h>
85 85
86 86 static int g_fd;
87 87 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
88 88 static int g_refcount;
89 89
90 90 int
91 91 libzfs_core_init(void)
92 92 {
93 93 (void) pthread_mutex_lock(&g_lock);
94 94 if (g_refcount == 0) {
95 95 g_fd = open("/dev/zfs", O_RDWR);
96 96 if (g_fd < 0) {
97 97 (void) pthread_mutex_unlock(&g_lock);
98 98 return (errno);
99 99 }
100 100 }
101 101 g_refcount++;
102 102 (void) pthread_mutex_unlock(&g_lock);
103 103 return (0);
104 104 }
105 105
106 106 void
107 107 libzfs_core_fini(void)
108 108 {
109 109 (void) pthread_mutex_lock(&g_lock);
110 110 ASSERT3S(g_refcount, >, 0);
111 111 g_refcount--;
112 112 if (g_refcount == 0)
113 113 (void) close(g_fd);
114 114 (void) pthread_mutex_unlock(&g_lock);
115 115 }
116 116
117 117 static int
118 118 lzc_ioctl(zfs_ioc_t ioc, const char *name,
119 119 nvlist_t *source, nvlist_t **resultp)
120 120 {
121 121 zfs_cmd_t zc = { 0 };
122 122 int error = 0;
123 123 char *packed;
124 124 size_t size;
125 125
126 126 ASSERT3S(g_refcount, >, 0);
127 127
128 128 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
129 129
130 130 packed = fnvlist_pack(source, &size);
131 131 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
132 132 zc.zc_nvlist_src_size = size;
133 133
134 134 if (resultp != NULL) {
135 135 *resultp = NULL;
136 136 zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
137 137 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
138 138 malloc(zc.zc_nvlist_dst_size);
139 139 if (zc.zc_nvlist_dst == NULL) {
140 140 error = ENOMEM;
141 141 goto out;
142 142 }
143 143 }
144 144
145 145 while (ioctl(g_fd, ioc, &zc) != 0) {
146 146 if (errno == ENOMEM && resultp != NULL) {
147 147 free((void *)(uintptr_t)zc.zc_nvlist_dst);
148 148 zc.zc_nvlist_dst_size *= 2;
149 149 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
150 150 malloc(zc.zc_nvlist_dst_size);
151 151 if (zc.zc_nvlist_dst == NULL) {
152 152 error = ENOMEM;
153 153 goto out;
154 154 }
155 155 } else {
156 156 error = errno;
157 157 break;
158 158 }
159 159 }
160 160 if (zc.zc_nvlist_dst_filled) {
161 161 *resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
162 162 zc.zc_nvlist_dst_size);
163 163 }
164 164
165 165 out:
166 166 fnvlist_pack_free(packed, size);
167 167 free((void *)(uintptr_t)zc.zc_nvlist_dst);
168 168 return (error);
169 169 }
170 170
171 171 int
172 172 lzc_create(const char *fsname, dmu_objset_type_t type, nvlist_t *props)
173 173 {
174 174 int error;
175 175 nvlist_t *args = fnvlist_alloc();
176 176 fnvlist_add_int32(args, "type", type);
177 177 if (props != NULL)
178 178 fnvlist_add_nvlist(args, "props", props);
179 179 error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
180 180 nvlist_free(args);
181 181 return (error);
182 182 }
183 183
184 184 int
185 185 lzc_clone(const char *fsname, const char *origin,
186 186 nvlist_t *props)
187 187 {
188 188 int error;
189 189 nvlist_t *args = fnvlist_alloc();
190 190 fnvlist_add_string(args, "origin", origin);
191 191 if (props != NULL)
192 192 fnvlist_add_nvlist(args, "props", props);
193 193 error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
194 194 nvlist_free(args);
195 195 return (error);
196 196 }
197 197
198 198 /*
199 199 * Creates snapshots.
200 200 *
201 201 * The keys in the snaps nvlist are the snapshots to be created.
202 202 * They must all be in the same pool.
203 203 *
204 204 * The props nvlist is properties to set. Currently only user properties
205 205 * are supported. { user:prop_name -> string value }
206 206 *
207 207 * The returned results nvlist will have an entry for each snapshot that failed.
208 208 * The value will be the (int32) error code.
209 209 *
210 210 * The return value will be 0 if all snapshots were created, otherwise it will
211 211 * be the errno of a (unspecified) snapshot that failed.
212 212 */
213 213 int
214 214 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
215 215 {
216 216 nvpair_t *elem;
217 217 nvlist_t *args;
218 218 int error;
219 219 char pool[MAXNAMELEN];
220 220
221 221 *errlist = NULL;
222 222
223 223 /* determine the pool name */
224 224 elem = nvlist_next_nvpair(snaps, NULL);
225 225 if (elem == NULL)
226 226 return (0);
227 227 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
228 228 pool[strcspn(pool, "/@")] = '\0';
229 229
230 230 args = fnvlist_alloc();
231 231 fnvlist_add_nvlist(args, "snaps", snaps);
232 232 if (props != NULL)
233 233 fnvlist_add_nvlist(args, "props", props);
234 234
235 235 error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
236 236 nvlist_free(args);
237 237
238 238 return (error);
239 239 }
240 240
241 241 /*
242 242 * Destroys snapshots.
243 243 *
244 244 * The keys in the snaps nvlist are the snapshots to be destroyed.
245 245 * They must all be in the same pool.
246 246 *
247 247 * Snapshots that do not exist will be silently ignored.
248 248 *
249 249 * If 'defer' is not set, and a snapshot has user holds or clones, the
250 250 * destroy operation will fail and none of the snapshots will be
251 251 * destroyed.
252 252 *
253 253 * If 'defer' is set, and a snapshot has user holds or clones, it will be
254 254 * marked for deferred destruction, and will be destroyed when the last hold
255 255 * or clone is removed/destroyed.
256 256 *
257 257 * The return value will be 0 if all snapshots were destroyed (or marked for
258 258 * later destruction if 'defer' is set) or didn't exist to begin with.
259 259 *
260 260 * Otherwise the return value will be the errno of a (unspecified) snapshot
261 261 * that failed, no snapshots will be destroyed, and the errlist will have an
262 262 * entry for each snapshot that failed. The value in the errlist will be
263 263 * the (int32) error code.
264 264 */
265 265 int
266 266 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
267 267 {
268 268 nvpair_t *elem;
269 269 nvlist_t *args;
270 270 int error;
271 271 char pool[MAXNAMELEN];
272 272
273 273 /* determine the pool name */
274 274 elem = nvlist_next_nvpair(snaps, NULL);
275 275 if (elem == NULL)
276 276 return (0);
277 277 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
278 278 pool[strcspn(pool, "/@")] = '\0';
279 279
280 280 args = fnvlist_alloc();
281 281 fnvlist_add_nvlist(args, "snaps", snaps);
282 282 if (defer)
283 283 fnvlist_add_boolean(args, "defer");
284 284
285 285 error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
286 286 nvlist_free(args);
287 287
288 288 return (error);
289 289
290 290 }
291 291
292 292 int
293 293 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
294 294 uint64_t *usedp)
295 295 {
296 296 nvlist_t *args;
297 297 nvlist_t *result;
298 298 int err;
299 299 char fs[MAXNAMELEN];
300 300 char *atp;
301 301
302 302 /* determine the fs name */
303 303 (void) strlcpy(fs, firstsnap, sizeof (fs));
304 304 atp = strchr(fs, '@');
305 305 if (atp == NULL)
306 306 return (EINVAL);
307 307 *atp = '\0';
308 308
309 309 args = fnvlist_alloc();
310 310 fnvlist_add_string(args, "firstsnap", firstsnap);
311 311
312 312 err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
313 313 nvlist_free(args);
314 314 if (err == 0)
315 315 *usedp = fnvlist_lookup_uint64(result, "used");
316 316 fnvlist_free(result);
317 317
318 318 return (err);
319 319 }
320 320
321 321 boolean_t
322 322 lzc_exists(const char *dataset)
323 323 {
324 324 /*
325 325 * The objset_stats ioctl is still legacy, so we need to construct our
326 326 * own zfs_cmd_t rather than using zfsc_ioctl().
327 327 */
328 328 zfs_cmd_t zc = { 0 };
329 329
330 330 (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
331 331 return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
332 332 }
333 333
334 334 /*
335 335 * Create "user holds" on snapshots. If there is a hold on a snapshot,
336 336 * the snapshot can not be destroyed. (However, it can be marked for deletion
337 337 * by lzc_destroy_snaps(defer=B_TRUE).)
338 338 *
↓ open down ↓ |
338 lines elided |
↑ open up ↑ |
339 339 * The keys in the nvlist are snapshot names.
340 340 * The snapshots must all be in the same pool.
341 341 * The value is the name of the hold (string type).
342 342 *
343 343 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
344 344 * In this case, when the cleanup_fd is closed (including on process
345 345 * termination), the holds will be released. If the system is shut down
346 346 * uncleanly, the holds will be released when the pool is next opened
347 347 * or imported.
348 348 *
349 - * The return value will be 0 if all holds were created. Otherwise the return
350 - * value will be the errno of a (unspecified) hold that failed, no holds will
351 - * be created, and the errlist will have an entry for each hold that
352 - * failed (name = snapshot). The value in the errlist will be the error
353 - * code (int32).
349 + * Holds for snapshots which don't exist will be skipped and have an entry
350 + * added to errlist, but will not cause an overall failure, except in the
351 + * case that all holds where skipped.
352 + *
353 + * The return value will be 0 if the nvl holds was empty or all holds, for
354 + * snapshots that existed, were succesfully created and at least one hold
355 + * was created.
356 + *
357 + * If none of the snapshots for the requested holds existed ENOENT will be
358 + * returned.
359 + *
360 + * Otherwise the return value will be the errno of a (unspecified) hold that
361 + * failed, no holds will be created.
362 + *
363 + * In all cases the errlist will have an entry for each hold that failed
364 + * (name = snapshot), with its value being the error code (int32).
354 365 */
355 366 int
356 367 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
357 368 {
358 369 char pool[MAXNAMELEN];
359 370 nvlist_t *args;
360 371 nvpair_t *elem;
361 372 int error;
362 373
363 374 /* determine the pool name */
364 375 elem = nvlist_next_nvpair(holds, NULL);
365 376 if (elem == NULL)
366 377 return (0);
367 378 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
368 379 pool[strcspn(pool, "/@")] = '\0';
369 380
370 381 args = fnvlist_alloc();
371 382 fnvlist_add_nvlist(args, "holds", holds);
372 383 if (cleanup_fd != -1)
373 384 fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
374 385
375 386 error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
376 387 nvlist_free(args);
377 388 return (error);
378 389 }
379 390
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
380 391 /*
381 392 * Release "user holds" on snapshots. If the snapshot has been marked for
382 393 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
383 394 * any clones, and all the user holds are removed, then the snapshot will be
384 395 * destroyed.
385 396 *
386 397 * The keys in the nvlist are snapshot names.
387 398 * The snapshots must all be in the same pool.
388 399 * The value is a nvlist whose keys are the holds to remove.
389 400 *
390 - * The return value will be 0 if all holds were removed.
391 - * Otherwise the return value will be the errno of a (unspecified) release
392 - * that failed, no holds will be released, and the errlist will have an
393 - * entry for each snapshot that has failed releases (name = snapshot).
394 - * The value in the errlist will be the error code (int32) of a failed release.
401 + * Holds which failed to release because they didn't exist will have an entry
402 + * added to errlist, but will not cause an overall failure.
403 + *
404 + * The return value will be 0 if the nvl holds was empty or all holds, that
405 + * existed, were succesfully removed and at least one hold was removed.
406 + *
407 + * If none of the holds specified existed ENOENT will be returned.
408 + *
409 + * Otherwise the return value will be the errno of a (unspecified) hold that
410 + * failed to release and no holds will be released.
411 + *
412 + * In all cases the errlist will have an entry for each hold that failed to
413 + * to release.
395 414 */
396 415 int
397 416 lzc_release(nvlist_t *holds, nvlist_t **errlist)
398 417 {
399 418 char pool[MAXNAMELEN];
400 419 nvpair_t *elem;
401 420
402 421 /* determine the pool name */
403 422 elem = nvlist_next_nvpair(holds, NULL);
404 423 if (elem == NULL)
405 424 return (0);
406 425 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
407 426 pool[strcspn(pool, "/@")] = '\0';
408 427
409 428 return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
410 429 }
411 430
412 431 /*
413 432 * Retrieve list of user holds on the specified snapshot.
414 433 *
415 434 * On success, *holdsp will be set to a nvlist which the caller must free.
416 435 * The keys are the names of the holds, and the value is the creation time
417 436 * of the hold (uint64) in seconds since the epoch.
418 437 */
419 438 int
420 439 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
421 440 {
422 441 int error;
423 442 nvlist_t *innvl = fnvlist_alloc();
424 443 error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
425 444 fnvlist_free(innvl);
426 445 return (error);
427 446 }
428 447
429 448 /*
430 449 * If fromsnap is NULL, a full (non-incremental) stream will be sent.
431 450 */
432 451 int
433 452 lzc_send(const char *snapname, const char *fromsnap, int fd)
434 453 {
435 454 nvlist_t *args;
436 455 int err;
437 456
438 457 args = fnvlist_alloc();
439 458 fnvlist_add_int32(args, "fd", fd);
440 459 if (fromsnap != NULL)
441 460 fnvlist_add_string(args, "fromsnap", fromsnap);
442 461 err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
443 462 nvlist_free(args);
444 463 return (err);
445 464 }
446 465
447 466 /*
448 467 * If fromsnap is NULL, a full (non-incremental) stream will be estimated.
449 468 */
450 469 int
451 470 lzc_send_space(const char *snapname, const char *fromsnap, uint64_t *spacep)
452 471 {
453 472 nvlist_t *args;
454 473 nvlist_t *result;
455 474 int err;
456 475
457 476 args = fnvlist_alloc();
458 477 if (fromsnap != NULL)
459 478 fnvlist_add_string(args, "fromsnap", fromsnap);
460 479 err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
461 480 nvlist_free(args);
462 481 if (err == 0)
463 482 *spacep = fnvlist_lookup_uint64(result, "space");
464 483 nvlist_free(result);
465 484 return (err);
466 485 }
467 486
468 487 static int
469 488 recv_read(int fd, void *buf, int ilen)
470 489 {
471 490 char *cp = buf;
472 491 int rv;
473 492 int len = ilen;
474 493
475 494 do {
476 495 rv = read(fd, cp, len);
477 496 cp += rv;
478 497 len -= rv;
479 498 } while (rv > 0);
480 499
481 500 if (rv < 0 || len != 0)
482 501 return (EIO);
483 502
484 503 return (0);
485 504 }
486 505
487 506 /*
488 507 * The simplest receive case: receive from the specified fd, creating the
489 508 * specified snapshot. Apply the specified properties a "received" properties
490 509 * (which can be overridden by locally-set properties). If the stream is a
491 510 * clone, its origin snapshot must be specified by 'origin'. The 'force'
492 511 * flag will cause the target filesystem to be rolled back or destroyed if
493 512 * necessary to receive.
494 513 *
495 514 * Return 0 on success or an errno on failure.
496 515 *
497 516 * Note: this interface does not work on dedup'd streams
498 517 * (those with DMU_BACKUP_FEATURE_DEDUP).
499 518 */
500 519 int
501 520 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
502 521 boolean_t force, int fd)
503 522 {
504 523 /*
505 524 * The receive ioctl is still legacy, so we need to construct our own
506 525 * zfs_cmd_t rather than using zfsc_ioctl().
507 526 */
508 527 zfs_cmd_t zc = { 0 };
509 528 char *atp;
510 529 char *packed = NULL;
511 530 size_t size;
512 531 dmu_replay_record_t drr;
513 532 int error;
514 533
515 534 ASSERT3S(g_refcount, >, 0);
516 535
517 536 /* zc_name is name of containing filesystem */
518 537 (void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
519 538 atp = strchr(zc.zc_name, '@');
520 539 if (atp == NULL)
521 540 return (EINVAL);
522 541 *atp = '\0';
523 542
524 543 /* if the fs does not exist, try its parent. */
525 544 if (!lzc_exists(zc.zc_name)) {
526 545 char *slashp = strrchr(zc.zc_name, '/');
527 546 if (slashp == NULL)
528 547 return (ENOENT);
529 548 *slashp = '\0';
530 549
531 550 }
532 551
533 552 /* zc_value is full name of the snapshot to create */
534 553 (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
535 554
536 555 if (props != NULL) {
537 556 /* zc_nvlist_src is props to set */
538 557 packed = fnvlist_pack(props, &size);
539 558 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
540 559 zc.zc_nvlist_src_size = size;
541 560 }
542 561
543 562 /* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
544 563 if (origin != NULL)
545 564 (void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
546 565
547 566 /* zc_begin_record is non-byteswapped BEGIN record */
548 567 error = recv_read(fd, &drr, sizeof (drr));
549 568 if (error != 0)
550 569 goto out;
551 570 zc.zc_begin_record = drr.drr_u.drr_begin;
552 571
553 572 /* zc_cookie is fd to read from */
554 573 zc.zc_cookie = fd;
555 574
556 575 /* zc guid is force flag */
557 576 zc.zc_guid = force;
558 577
559 578 /* zc_cleanup_fd is unused */
560 579 zc.zc_cleanup_fd = -1;
561 580
562 581 error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
563 582 if (error != 0)
564 583 error = errno;
565 584
566 585 out:
567 586 if (packed != NULL)
568 587 fnvlist_pack_free(packed, size);
569 588 free((void*)(uintptr_t)zc.zc_nvlist_dst);
570 589 return (error);
571 590 }
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX