Print this page
Optimize creation and removal of temporary "user holds" placed on
snapshots by a zfs send, by ensuring all the required holds and
releases are done in a single dsl_sync_task.
Creation now collates the required holds during a dry run and
then uses a single lzc_hold call via zfs_hold_apply instead of
processing each snapshot in turn.
Defered (on exit) cleanup by the kernel is also now done in
dsl_sync_task by reusing dsl_dataset_user_release.
On a test with 11 volumes in a tree each with 8 snapshots on a
single HDD zpool this reduces the time required to perform a full
send from 20 seconds to under 0.8 seconds.
For reference eliminating the hold entirely reduces this 0.15
seconds.
While I'm here:-
* Remove some unused structures
* Fix nvlist_t leak in zfs_release_one

@@ -4099,28 +4099,69 @@
         zfs_close(zhp);
         return (rv);
 }
 
 int
+zfs_hold_add(zfs_handle_t *zhp, const char *snapname, const char *tag,
+    boolean_t enoent_ok, nvlist_t *holds)
+{
+        zfs_handle_t *szhp;
+        char name[ZFS_MAXNAMELEN];
+        char errbuf[1024];
+        int ret;
+
+        (void) snprintf(name, sizeof (name),
+            "%s@%s", zhp->zfs_name, snapname);
+
+        szhp = make_dataset_handle(zhp->zfs_hdl, name);
+        if (szhp) {
+                fnvlist_add_string(holds, name, tag);
+                zfs_close(szhp);
+                return (0);
+        }
+
+        ret = ENOENT;
+        if (enoent_ok)
+                return (ret);
+
+        (void) snprintf(errbuf, sizeof (errbuf),
+            dgettext(TEXT_DOMAIN, "cannot hold snapshot '%s@%s'"),
+            zhp->zfs_name, snapname);
+        (void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
+
+        return (ret);
+}
+
+int
 zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
     boolean_t recursive, boolean_t enoent_ok, int cleanup_fd)
 {
         int ret;
         struct holdarg ha;
-        nvlist_t *errors;
-        libzfs_handle_t *hdl = zhp->zfs_hdl;
-        char errbuf[1024];
-        nvpair_t *elem;
 
         ha.nvl = fnvlist_alloc();
         ha.snapname = snapname;
         ha.tag = tag;
         ha.recursive = recursive;
         (void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
-        ret = lzc_hold(ha.nvl, cleanup_fd, &errors);
+        ret = zfs_hold_apply(zhp, enoent_ok, cleanup_fd, ha.nvl);
         fnvlist_free(ha.nvl);
 
+        return (ret);
+}
+
+int
+zfs_hold_apply(zfs_handle_t *zhp, boolean_t enoent_ok, int cleanup_fd, nvlist_t *holds)
+{
+        int ret;
+        nvlist_t *errors;
+        libzfs_handle_t *hdl = zhp->zfs_hdl;
+        char errbuf[1024];
+        nvpair_t *elem;
+
+        ret = lzc_hold(holds, cleanup_fd, &errors);
+
         if (ret == 0)
                 return (0);
 
         if (nvlist_next_nvpair(errors, NULL) == NULL) {
                 /* no hold-specific errors */

@@ -4174,17 +4215,10 @@
 
         fnvlist_free(errors);
         return (ret);
 }
 
-struct releasearg {
-        nvlist_t *nvl;
-        const char *snapname;
-        const char *tag;
-        boolean_t recursive;
-};
-
 static int
 zfs_release_one(zfs_handle_t *zhp, void *arg)
 {
         struct holdarg *ha = arg;
         zfs_handle_t *szhp;

@@ -4197,10 +4231,11 @@
         szhp = make_dataset_handle(zhp->zfs_hdl, name);
         if (szhp) {
                 nvlist_t *holds = fnvlist_alloc();
                 fnvlist_add_boolean(holds, ha->tag);
                 fnvlist_add_nvlist(ha->nvl, name, holds);
+                fnvlist_free(holds);
                 zfs_close(szhp);
         }
 
         if (ha->recursive)
                 rv = zfs_iter_filesystems(zhp, zfs_release_one, ha);