Print this page
4045 zfs write throttle & i/o scheduler performance work
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>

@@ -44,23 +44,95 @@
 #include <sys/bptree.h>
 #include <sys/zfeature.h>
 #include <sys/zil_impl.h>
 #include <sys/dsl_userhold.h>
 
-int zfs_no_write_throttle = 0;
-int zfs_write_limit_shift = 3;                  /* 1/8th of physical memory */
-int zfs_txg_synctime_ms = 1000;         /* target millisecs to sync a txg */
+/*
+ * ZFS Write Throttle
+ * ------------------
+ *
+ * ZFS must limit the rate of incoming writes to the rate at which it is able
+ * to sync data modifications to the backend storage. Throttling by too much
+ * creates an artificial limit; throttling by too little can only be sustained
+ * for short periods and would lead to highly lumpy performance. On a per-pool
+ * basis, ZFS tracks the amount of modified (dirty) data. As operations change
+ * data, the amount of dirty data increases; as ZFS syncs out data, the amount
+ * of dirty data decreases. When the amount of dirty data exceeds a
+ * predetermined threshold further modifications are blocked until the amount
+ * of dirty data decreases (as data is synced out).
+ *
+ * The limit on dirty data is tunable, and should be adjusted according to
+ * both the IO capacity and available memory of the system. The larger the
+ * window, the more ZFS is able to aggregate and amortize metadata (and data)
+ * changes. However, memory is a limited resource, and allowing for more dirty
+ * data comes at the cost of keeping other useful data in memory (for example
+ * ZFS data cached by the ARC).
+ *
+ * Implementation
+ *
+ * As buffers are modified dsl_pool_willuse_space() increments both the per-
+ * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
+ * dirty space used; dsl_pool_dirty_space() decrements those values as data
+ * is synced out from dsl_pool_sync(). While only the poolwide value is
+ * relevant, the per-txg value is useful for debugging. The tunable
+ * zfs_dirty_data_max determines the dirty space limit. Once that value is
+ * exceeded, new writes are halted until space frees up.
+ *
+ * The zfs_dirty_data_sync tunable dictates the threshold at which we
+ * ensure that there is a txg syncing (see the comment in txg.c for a full
+ * description of transaction group stages).
+ *
+ * The IO scheduler uses both the dirty space limit and current amount of
+ * dirty data as inputs. Those values affect the number of concurrent IOs ZFS
+ * issues. See the comment in vdev_queue.c for details of the IO scheduler.
+ *
+ * The delay is also calculated based on the amount of dirty data.  See the
+ * comment above dmu_tx_delay() for details.
+ */
 
-uint64_t zfs_write_limit_min = 32 << 20;        /* min write limit is 32MB */
-uint64_t zfs_write_limit_max = 0;               /* max data payload per txg */
-uint64_t zfs_write_limit_inflated = 0;
-uint64_t zfs_write_limit_override = 0;
+/*
+ * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
+ * capped at zfs_dirty_data_max_max.  It can also be overridden in /etc/system.
+ */
+uint64_t zfs_dirty_data_max;
+uint64_t zfs_dirty_data_max_max = 4ULL * 1024 * 1024 * 1024;
+int zfs_dirty_data_max_percent = 10;
 
-kmutex_t zfs_write_limit_lock;
+/*
+ * If there is at least this much dirty data, push out a txg.
+ */
+uint64_t zfs_dirty_data_sync = 64 * 1024 * 1024;
 
-static pgcnt_t old_physmem = 0;
+/*
+ * Once there is this amount of dirty data, the dmu_tx_delay() will kick in
+ * and delay each transaction.
+ * This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
+ */
+int zfs_delay_min_dirty_percent = 60;
 
+/*
+ * This controls how quickly the delay approaches infinity.
+ * Larger values cause it to delay less for a given amount of dirty data.
+ * Therefore larger values will cause there to be more dirty data for a
+ * given throughput.
+ *
+ * For the smoothest delay, this value should be about 1 billion divided
+ * by the maximum number of operations per second.  This will smoothly
+ * handle between 10x and 1/10th this number.
+ *
+ * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
+ * multiply in dmu_tx_delay().
+ */
+uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
+
+
+/*
+ * XXX someday maybe turn these into #defines, and you have to tune it on a
+ * per-pool basis using zfs.conf.
+ */
+
+
 hrtime_t zfs_throttle_delay = MSEC2NSEC(10);
 hrtime_t zfs_throttle_resolution = MSEC2NSEC(10);
 
 int
 dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)

@@ -85,11 +157,10 @@
 
         dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
         dp->dp_spa = spa;
         dp->dp_meta_rootbp = *bp;
         rrw_init(&dp->dp_config_rwlock, B_TRUE);
-        dp->dp_write_limit = zfs_write_limit_min;
         txg_init(dp, txg);
 
         txg_list_create(&dp->dp_dirty_datasets,
             offsetof(dsl_dataset_t, ds_dirty_link));
         txg_list_create(&dp->dp_dirty_zilogs,

@@ -98,10 +169,11 @@
             offsetof(dsl_dir_t, dd_dirty_link));
         txg_list_create(&dp->dp_sync_tasks,
             offsetof(dsl_sync_task_t, dst_node));
 
         mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
+        cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
 
         dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
             1, 4, 0);
 
         return (dp);

@@ -212,13 +284,13 @@
 }
 
 void
 dsl_pool_close(dsl_pool_t *dp)
 {
-        /* drop our references from dsl_pool_open() */
-
         /*
+         * Drop our references from dsl_pool_open().
+         *
          * Since we held the origin_snap from "syncing" context (which
          * includes pool-opening context), it actually only got a "ref"
          * and not a hold, so just drop that here.
          */
         if (dp->dp_origin_snap)

@@ -344,99 +416,119 @@
         dsl_deadlist_t *dl = arg;
         dsl_deadlist_insert(dl, bp, tx);
         return (0);
 }
 
+static void
+dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
+{
+        zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
+        dmu_objset_sync(dp->dp_meta_objset, zio, tx);
+        VERIFY0(zio_wait(zio));
+        dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
+        spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
+}
+
+static void
+dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
+{
+        ASSERT(MUTEX_HELD(&dp->dp_lock));
+
+        if (delta < 0)
+                ASSERT3U(-delta, <=, dp->dp_dirty_total);
+
+        dp->dp_dirty_total += delta;
+
+        /*
+         * Note: we signal even when increasing dp_dirty_total.
+         * This ensures forward progress -- each thread wakes the next waiter.
+         */
+        if (dp->dp_dirty_total <= zfs_dirty_data_max)
+                cv_signal(&dp->dp_spaceavail_cv);
+}
+
 void
 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
 {
         zio_t *zio;
         dmu_tx_t *tx;
         dsl_dir_t *dd;
         dsl_dataset_t *ds;
         objset_t *mos = dp->dp_meta_objset;
-        hrtime_t start, write_time;
-        uint64_t data_written;
-        int err;
         list_t synced_datasets;
 
         list_create(&synced_datasets, sizeof (dsl_dataset_t),
             offsetof(dsl_dataset_t, ds_synced_link));
 
-        /*
-         * We need to copy dp_space_towrite() before doing
-         * dsl_sync_task_sync(), because
-         * dsl_dataset_snapshot_reserve_space() will increase
-         * dp_space_towrite but not actually write anything.
-         */
-        data_written = dp->dp_space_towrite[txg & TXG_MASK];
-
         tx = dmu_tx_create_assigned(dp, txg);
 
-        dp->dp_read_overhead = 0;
-        start = gethrtime();
-
+        /*
+         * Write out all dirty blocks of dirty datasets.
+         */
         zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
-        while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
+        while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
                 /*
                  * We must not sync any non-MOS datasets twice, because
                  * we may have taken a snapshot of them.  However, we
                  * may sync newly-created datasets on pass 2.
                  */
                 ASSERT(!list_link_active(&ds->ds_synced_link));
                 list_insert_tail(&synced_datasets, ds);
                 dsl_dataset_sync(ds, zio, tx);
         }
-        DTRACE_PROBE(pool_sync__1setup);
-        err = zio_wait(zio);
+        VERIFY0(zio_wait(zio));
 
-        write_time = gethrtime() - start;
-        ASSERT(err == 0);
-        DTRACE_PROBE(pool_sync__2rootzio);
+        /*
+         * We have written all of the accounted dirty data, so our
+         * dp_space_towrite should now be zero.  However, some seldom-used
+         * code paths do not adhere to this (e.g. dbuf_undirty(), also
+         * rounding error in dbuf_write_physdone).
+         * Shore up the accounting of any dirtied space now.
+         */
+        dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
 
         /*
          * After the data blocks have been written (ensured by the zio_wait()
          * above), update the user/group space accounting.
          */
-        for (ds = list_head(&synced_datasets); ds;
-            ds = list_next(&synced_datasets, ds))
+        for (ds = list_head(&synced_datasets); ds != NULL;
+            ds = list_next(&synced_datasets, ds)) {
                 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
+        }
 
         /*
          * Sync the datasets again to push out the changes due to
          * userspace updates.  This must be done before we process the
          * sync tasks, so that any snapshots will have the correct
          * user accounting information (and we won't get confused
          * about which blocks are part of the snapshot).
          */
         zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
-        while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
+        while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
                 ASSERT(list_link_active(&ds->ds_synced_link));
                 dmu_buf_rele(ds->ds_dbuf, ds);
                 dsl_dataset_sync(ds, zio, tx);
         }
-        err = zio_wait(zio);
+        VERIFY0(zio_wait(zio));
 
         /*
          * Now that the datasets have been completely synced, we can
          * clean up our in-memory structures accumulated while syncing:
          *
          *  - move dead blocks from the pending deadlist to the on-disk deadlist
          *  - release hold from dsl_dataset_dirty()
          */
-        while (ds = list_remove_head(&synced_datasets)) {
+        while ((ds = list_remove_head(&synced_datasets)) != NULL) {
                 objset_t *os = ds->ds_objset;
                 bplist_iterate(&ds->ds_pending_deadlist,
                     deadlist_enqueue_cb, &ds->ds_deadlist, tx);
                 ASSERT(!dmu_objset_is_dirty(os, txg));
                 dmu_buf_rele(ds->ds_dbuf, ds);
         }
-
-        start = gethrtime();
-        while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg))
+        while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
                 dsl_dir_sync(dd, tx);
-        write_time += gethrtime() - start;
+        }
 
         /*
          * The MOS's space is accounted for in the pool/$MOS
          * (dp_mos_dir).  We can't modify the mos while we're syncing
          * it, so we remember the deltas and apply them here.

@@ -450,24 +542,14 @@
                 dp->dp_mos_used_delta = 0;
                 dp->dp_mos_compressed_delta = 0;
                 dp->dp_mos_uncompressed_delta = 0;
         }
 
-        start = gethrtime();
         if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
             list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) {
-                zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
-                dmu_objset_sync(mos, zio, tx);
-                err = zio_wait(zio);
-                ASSERT(err == 0);
-                dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
-                spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
+                dsl_pool_sync_mos(dp, tx);
         }
-        write_time += gethrtime() - start;
-        DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time,
-            hrtime_t, dp->dp_read_overhead);
-        write_time -= dp->dp_read_overhead;
 
         /*
          * If we modify a dataset in the same txg that we want to destroy it,
          * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
          * dsl_dir_destroy_check() will fail if there are unexpected holds.

@@ -474,76 +556,33 @@
          * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
          * and clearing the hold on it) before we process the sync_tasks.
          * The MOS data dirtied by the sync_tasks will be synced on the next
          * pass.
          */
-        DTRACE_PROBE(pool_sync__3task);
         if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
                 dsl_sync_task_t *dst;
                 /*
                  * No more sync tasks should have been added while we
                  * were syncing.
                  */
-                ASSERT(spa_sync_pass(dp->dp_spa) == 1);
-                while (dst = txg_list_remove(&dp->dp_sync_tasks, txg))
+                ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
+                while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
                         dsl_sync_task_sync(dst, tx);
         }
 
         dmu_tx_commit(tx);
 
-        dp->dp_space_towrite[txg & TXG_MASK] = 0;
-        ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0);
-
-        /*
-         * If the write limit max has not been explicitly set, set it
-         * to a fraction of available physical memory (default 1/8th).
-         * Note that we must inflate the limit because the spa
-         * inflates write sizes to account for data replication.
-         * Check this each sync phase to catch changing memory size.
-         */
-        if (physmem != old_physmem && zfs_write_limit_shift) {
-                mutex_enter(&zfs_write_limit_lock);
-                old_physmem = physmem;
-                zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
-                zfs_write_limit_inflated = MAX(zfs_write_limit_min,
-                    spa_get_asize(dp->dp_spa, zfs_write_limit_max));
-                mutex_exit(&zfs_write_limit_lock);
-        }
-
-        /*
-         * Attempt to keep the sync time consistent by adjusting the
-         * amount of write traffic allowed into each transaction group.
-         * Weight the throughput calculation towards the current value:
-         *      thru = 3/4 old_thru + 1/4 new_thru
-         *
-         * Note: write_time is in nanosecs while dp_throughput is expressed in
-         * bytes per millisecond.
-         */
-        ASSERT(zfs_write_limit_min > 0);
-        if (data_written > zfs_write_limit_min / 8 &&
-            write_time > MSEC2NSEC(1)) {
-                uint64_t throughput = data_written / NSEC2MSEC(write_time);
-
-                if (dp->dp_throughput)
-                        dp->dp_throughput = throughput / 4 +
-                            3 * dp->dp_throughput / 4;
-                else
-                        dp->dp_throughput = throughput;
-                dp->dp_write_limit = MIN(zfs_write_limit_inflated,
-                    MAX(zfs_write_limit_min,
-                    dp->dp_throughput * zfs_txg_synctime_ms));
-        }
+        DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
 }
 
 void
 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
 {
         zilog_t *zilog;
-        dsl_dataset_t *ds;
 
         while (zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg)) {
-                ds = dmu_objset_ds(zilog->zl_os);
+                dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
                 zil_clean(zilog, txg);
                 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
                 dmu_buf_rele(ds->ds_dbuf, zilog);
         }
         ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));

@@ -581,86 +620,51 @@
                 resv >>= 1;
 
         return (space - resv);
 }
 
-int
-dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
+boolean_t
+dsl_pool_need_dirty_delay(dsl_pool_t *dp)
 {
-        uint64_t reserved = 0;
-        uint64_t write_limit = (zfs_write_limit_override ?
-            zfs_write_limit_override : dp->dp_write_limit);
+        uint64_t delay_min_bytes =
+            zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
+        boolean_t rv;
 
-        if (zfs_no_write_throttle) {
-                atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK],
-                    space);
-                return (0);
-        }
-
-        /*
-         * Check to see if we have exceeded the maximum allowed IO for
-         * this transaction group.  We can do this without locks since
-         * a little slop here is ok.  Note that we do the reserved check
-         * with only half the requested reserve: this is because the
-         * reserve requests are worst-case, and we really don't want to
-         * throttle based off of worst-case estimates.
-         */
-        if (write_limit > 0) {
-                reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK]
-                    + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2;
-
-                if (reserved && reserved > write_limit)
-                        return (SET_ERROR(ERESTART));
-        }
-
-        atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space);
-
-        /*
-         * If this transaction group is over 7/8ths capacity, delay
-         * the caller 1 clock tick.  This will slow down the "fill"
-         * rate until the sync process can catch up with us.
-         */
-        if (reserved && reserved > (write_limit - (write_limit >> 3))) {
-                txg_delay(dp, tx->tx_txg, zfs_throttle_delay,
-                    zfs_throttle_resolution);
-        }
-
-        return (0);
+        mutex_enter(&dp->dp_lock);
+        if (dp->dp_dirty_total > zfs_dirty_data_sync)
+                txg_kick(dp);
+        rv = (dp->dp_dirty_total > delay_min_bytes);
+        mutex_exit(&dp->dp_lock);
+        return (rv);
 }
 
 void
-dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
+dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
 {
-        ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space);
-        atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space);
-}
-
-void
-dsl_pool_memory_pressure(dsl_pool_t *dp)
-{
-        uint64_t space_inuse = 0;
-        int i;
-
-        if (dp->dp_write_limit == zfs_write_limit_min)
-                return;
-
-        for (i = 0; i < TXG_SIZE; i++) {
-                space_inuse += dp->dp_space_towrite[i];
-                space_inuse += dp->dp_tempreserved[i];
+        if (space > 0) {
+                mutex_enter(&dp->dp_lock);
+                dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
+                dsl_pool_dirty_delta(dp, space);
+                mutex_exit(&dp->dp_lock);
         }
-        dp->dp_write_limit = MAX(zfs_write_limit_min,
-            MIN(dp->dp_write_limit, space_inuse / 4));
 }
 
 void
-dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
-{
-        if (space > 0) {
+dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) {
+        ASSERT3S(space, >=, 0);
+        if (space == 0)
+                return;
                 mutex_enter(&dp->dp_lock);
-                dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space;
-                mutex_exit(&dp->dp_lock);
+        if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
+                /* XXX writing something we didn't dirty? */
+                space = dp->dp_dirty_pertxg[txg & TXG_MASK];
         }
+        ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
+        dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
+        ASSERT3U(dp->dp_dirty_total, >=, space);
+        dsl_pool_dirty_delta(dp, -space);
+        mutex_exit(&dp->dp_lock);
 }
 
 /* ARGSUSED */
 static int
 upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)