1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Portions Copyright 2011 Martin Matuska
  24  * Copyright (c) 2013 by Delphix. All rights reserved.
  25  */
  26 
  27 #include <sys/zfs_context.h>
  28 #include <sys/txg_impl.h>
  29 #include <sys/dmu_impl.h>
  30 #include <sys/dmu_tx.h>
  31 #include <sys/dsl_pool.h>
  32 #include <sys/dsl_scan.h>
  33 #include <sys/callb.h>
  34 
  35 /*
  36  * ZFS Transaction Groups
  37  * ----------------------
  38  *
  39  * ZFS transaction groups are, as the name implies, groups of transactions
  40  * that act on persistent state. ZFS asserts consistency at the granularity of
  41  * these transaction groups. Each successive transaction group (txg) is
  42  * assigned a 64-bit consecutive identifier. There are three active
  43  * transaction group states: open, quiescing, or syncing. At any given time,
  44  * there may be an active txg associated with each state; each active txg may
  45  * either be processing, or blocked waiting to enter the next state. There may
  46  * be up to three active txgs, and there is always a txg in the open state
  47  * (though it may be blocked waiting to enter the quiescing state). In broad
  48  * strokes, transactions — operations that change in-memory structures — are
  49  * accepted into the txg in the open state, and are completed while the txg is
  50  * in the open or quiescing states. The accumulated changes are written to
  51  * disk in the syncing state.
  52  *
  53  * Open
  54  *
  55  * When a new txg becomes active, it first enters the open state. New
  56  * transactions — updates to in-memory structures — are assigned to the
  57  * currently open txg. There is always a txg in the open state so that ZFS can
  58  * accept new changes (though the txg may refuse new changes if it has hit
  59  * some limit). ZFS advances the open txg to the next state for a variety of
  60  * reasons such as it hitting a time or size threshold, or the execution of an
  61  * administrative action that must be completed in the syncing state.
  62  *
  63  * Quiescing
  64  *
  65  * After a txg exits the open state, it enters the quiescing state. The
  66  * quiescing state is intended to provide a buffer between accepting new
  67  * transactions in the open state and writing them out to stable storage in
  68  * the syncing state. While quiescing, transactions can continue their
  69  * operation without delaying either of the other states. Typically, a txg is
  70  * in the quiescing state very briefly since the operations are bounded by
  71  * software latencies rather than, say, slower I/O latencies. After all
  72  * transactions complete, the txg is ready to enter the next state.
  73  *
  74  * Syncing
  75  *
  76  * In the syncing state, the in-memory state built up during the open and (to
  77  * a lesser degree) the quiescing states is written to stable storage. The
  78  * process of writing out modified data can, in turn modify more data. For
  79  * example when we write new blocks, we need to allocate space for them; those
  80  * allocations modify metadata (space maps)... which themselves must be
  81  * written to stable storage. During the sync state, ZFS iterates, writing out
  82  * data until it converges and all in-memory changes have been written out.
  83  * The first such pass is the largest as it encompasses all the modified user
  84  * data (as opposed to filesystem metadata). Subsequent passes typically have
  85  * far less data to write as they consist exclusively of filesystem metadata.
  86  *
  87  * To ensure convergence, after a certain number of passes ZFS begins
  88  * overwriting locations on stable storage that had been allocated earlier in
  89  * the syncing state (and subsequently freed). ZFS usually allocates new
  90  * blocks to optimize for large, continuous, writes. For the syncing state to
  91  * converge however it must complete a pass where no new blocks are allocated
  92  * since each allocation requires a modification of persistent metadata.
  93  * Further, to hasten convergence, after a prescribed number of passes, ZFS
  94  * also defers frees, and stops compressing.
  95  *
  96  * In addition to writing out user data, we must also execute synctasks during
  97  * the syncing context. A synctask is the mechanism by which some
  98  * administrative activities work such as creating and destroying snapshots or
  99  * datasets. Note that when a synctask is initiated it enters the open txg,
 100  * and ZFS then pushes that txg as quickly as possible to completion of the
 101  * syncing state in order to reduce the latency of the administrative
 102  * activity. To complete the syncing state, ZFS writes out a new uberblock,
 103  * the root of the tree of blocks that comprise all state stored on the ZFS
 104  * pool. Finally, if there is a quiesced txg waiting, we signal that it can
 105  * now transition to the syncing state.
 106  */
 107 
 108 static void txg_sync_thread(dsl_pool_t *dp);
 109 static void txg_quiesce_thread(dsl_pool_t *dp);
 110 
 111 int zfs_txg_timeout = 5;        /* max seconds worth of delta per txg */
 112 
 113 /*
 114  * Prepare the txg subsystem.
 115  */
 116 void
 117 txg_init(dsl_pool_t *dp, uint64_t txg)
 118 {
 119         tx_state_t *tx = &dp->dp_tx;
 120         int c;
 121         bzero(tx, sizeof (tx_state_t));
 122 
 123         tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
 124 
 125         for (c = 0; c < max_ncpus; c++) {
 126                 int i;
 127 
 128                 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
 129                 for (i = 0; i < TXG_SIZE; i++) {
 130                         cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
 131                             NULL);
 132                         list_create(&tx->tx_cpu[c].tc_callbacks[i],
 133                             sizeof (dmu_tx_callback_t),
 134                             offsetof(dmu_tx_callback_t, dcb_node));
 135                 }
 136         }
 137 
 138         mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
 139 
 140         cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
 141         cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
 142         cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
 143         cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
 144         cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
 145 
 146         tx->tx_open_txg = txg;
 147 }
 148 
 149 /*
 150  * Close down the txg subsystem.
 151  */
 152 void
 153 txg_fini(dsl_pool_t *dp)
 154 {
 155         tx_state_t *tx = &dp->dp_tx;
 156         int c;
 157 
 158         ASSERT(tx->tx_threads == 0);
 159 
 160         mutex_destroy(&tx->tx_sync_lock);
 161 
 162         cv_destroy(&tx->tx_sync_more_cv);
 163         cv_destroy(&tx->tx_sync_done_cv);
 164         cv_destroy(&tx->tx_quiesce_more_cv);
 165         cv_destroy(&tx->tx_quiesce_done_cv);
 166         cv_destroy(&tx->tx_exit_cv);
 167 
 168         for (c = 0; c < max_ncpus; c++) {
 169                 int i;
 170 
 171                 mutex_destroy(&tx->tx_cpu[c].tc_lock);
 172                 for (i = 0; i < TXG_SIZE; i++) {
 173                         cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
 174                         list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
 175                 }
 176         }
 177 
 178         if (tx->tx_commit_cb_taskq != NULL)
 179                 taskq_destroy(tx->tx_commit_cb_taskq);
 180 
 181         kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
 182 
 183         bzero(tx, sizeof (tx_state_t));
 184 }
 185 
 186 /*
 187  * Start syncing transaction groups.
 188  */
 189 void
 190 txg_sync_start(dsl_pool_t *dp)
 191 {
 192         tx_state_t *tx = &dp->dp_tx;
 193 
 194         mutex_enter(&tx->tx_sync_lock);
 195 
 196         dprintf("pool %p\n", dp);
 197 
 198         ASSERT(tx->tx_threads == 0);
 199 
 200         tx->tx_threads = 2;
 201 
 202         tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
 203             dp, 0, &p0, TS_RUN, minclsyspri);
 204 
 205         /*
 206          * The sync thread can need a larger-than-default stack size on
 207          * 32-bit x86.  This is due in part to nested pools and
 208          * scrub_visitbp() recursion.
 209          */
 210         tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
 211             dp, 0, &p0, TS_RUN, minclsyspri);
 212 
 213         mutex_exit(&tx->tx_sync_lock);
 214 }
 215 
 216 static void
 217 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
 218 {
 219         CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
 220         mutex_enter(&tx->tx_sync_lock);
 221 }
 222 
 223 static void
 224 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
 225 {
 226         ASSERT(*tpp != NULL);
 227         *tpp = NULL;
 228         tx->tx_threads--;
 229         cv_broadcast(&tx->tx_exit_cv);
 230         CALLB_CPR_EXIT(cpr);            /* drops &tx->tx_sync_lock */
 231         thread_exit();
 232 }
 233 
 234 static void
 235 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
 236 {
 237         CALLB_CPR_SAFE_BEGIN(cpr);
 238 
 239         if (time)
 240                 (void) cv_timedwait(cv, &tx->tx_sync_lock,
 241                     ddi_get_lbolt() + time);
 242         else
 243                 cv_wait(cv, &tx->tx_sync_lock);
 244 
 245         CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
 246 }
 247 
 248 /*
 249  * Stop syncing transaction groups.
 250  */
 251 void
 252 txg_sync_stop(dsl_pool_t *dp)
 253 {
 254         tx_state_t *tx = &dp->dp_tx;
 255 
 256         dprintf("pool %p\n", dp);
 257         /*
 258          * Finish off any work in progress.
 259          */
 260         ASSERT(tx->tx_threads == 2);
 261 
 262         /*
 263          * We need to ensure that we've vacated the deferred space_maps.
 264          */
 265         txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
 266 
 267         /*
 268          * Wake all sync threads and wait for them to die.
 269          */
 270         mutex_enter(&tx->tx_sync_lock);
 271 
 272         ASSERT(tx->tx_threads == 2);
 273 
 274         tx->tx_exiting = 1;
 275 
 276         cv_broadcast(&tx->tx_quiesce_more_cv);
 277         cv_broadcast(&tx->tx_quiesce_done_cv);
 278         cv_broadcast(&tx->tx_sync_more_cv);
 279 
 280         while (tx->tx_threads != 0)
 281                 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
 282 
 283         tx->tx_exiting = 0;
 284 
 285         mutex_exit(&tx->tx_sync_lock);
 286 }
 287 
 288 uint64_t
 289 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
 290 {
 291         tx_state_t *tx = &dp->dp_tx;
 292         tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
 293         uint64_t txg;
 294 
 295         mutex_enter(&tc->tc_lock);
 296 
 297         txg = tx->tx_open_txg;
 298         tc->tc_count[txg & TXG_MASK]++;
 299 
 300         th->th_cpu = tc;
 301         th->th_txg = txg;
 302 
 303         return (txg);
 304 }
 305 
 306 void
 307 txg_rele_to_quiesce(txg_handle_t *th)
 308 {
 309         tx_cpu_t *tc = th->th_cpu;
 310 
 311         mutex_exit(&tc->tc_lock);
 312 }
 313 
 314 void
 315 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
 316 {
 317         tx_cpu_t *tc = th->th_cpu;
 318         int g = th->th_txg & TXG_MASK;
 319 
 320         mutex_enter(&tc->tc_lock);
 321         list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
 322         mutex_exit(&tc->tc_lock);
 323 }
 324 
 325 void
 326 txg_rele_to_sync(txg_handle_t *th)
 327 {
 328         tx_cpu_t *tc = th->th_cpu;
 329         int g = th->th_txg & TXG_MASK;
 330 
 331         mutex_enter(&tc->tc_lock);
 332         ASSERT(tc->tc_count[g] != 0);
 333         if (--tc->tc_count[g] == 0)
 334                 cv_broadcast(&tc->tc_cv[g]);
 335         mutex_exit(&tc->tc_lock);
 336 
 337         th->th_cpu = NULL;   /* defensive */
 338 }
 339 
 340 /*
 341  * Blocks until all transactions in the group are committed.
 342  *
 343  * On return, the transaction group has reached a stable state in which it can
 344  * then be passed off to the syncing context.
 345  */
 346 static void
 347 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
 348 {
 349         tx_state_t *tx = &dp->dp_tx;
 350         int g = txg & TXG_MASK;
 351         int c;
 352 
 353         /*
 354          * Grab all tx_cpu locks so nobody else can get into this txg.
 355          */
 356         for (c = 0; c < max_ncpus; c++)
 357                 mutex_enter(&tx->tx_cpu[c].tc_lock);
 358 
 359         ASSERT(txg == tx->tx_open_txg);
 360         tx->tx_open_txg++;
 361 
 362         DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
 363         DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
 364 
 365         /*
 366          * Now that we've incremented tx_open_txg, we can let threads
 367          * enter the next transaction group.
 368          */
 369         for (c = 0; c < max_ncpus; c++)
 370                 mutex_exit(&tx->tx_cpu[c].tc_lock);
 371 
 372         /*
 373          * Quiesce the transaction group by waiting for everyone to txg_exit().
 374          */
 375         for (c = 0; c < max_ncpus; c++) {
 376                 tx_cpu_t *tc = &tx->tx_cpu[c];
 377                 mutex_enter(&tc->tc_lock);
 378                 while (tc->tc_count[g] != 0)
 379                         cv_wait(&tc->tc_cv[g], &tc->tc_lock);
 380                 mutex_exit(&tc->tc_lock);
 381         }
 382 }
 383 
 384 static void
 385 txg_do_callbacks(list_t *cb_list)
 386 {
 387         dmu_tx_do_callbacks(cb_list, 0);
 388 
 389         list_destroy(cb_list);
 390 
 391         kmem_free(cb_list, sizeof (list_t));
 392 }
 393 
 394 /*
 395  * Dispatch the commit callbacks registered on this txg to worker threads.
 396  *
 397  * If no callbacks are registered for a given TXG, nothing happens.
 398  * This function creates a taskq for the associated pool, if needed.
 399  */
 400 static void
 401 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
 402 {
 403         int c;
 404         tx_state_t *tx = &dp->dp_tx;
 405         list_t *cb_list;
 406 
 407         for (c = 0; c < max_ncpus; c++) {
 408                 tx_cpu_t *tc = &tx->tx_cpu[c];
 409                 /*
 410                  * No need to lock tx_cpu_t at this point, since this can
 411                  * only be called once a txg has been synced.
 412                  */
 413 
 414                 int g = txg & TXG_MASK;
 415 
 416                 if (list_is_empty(&tc->tc_callbacks[g]))
 417                         continue;
 418 
 419                 if (tx->tx_commit_cb_taskq == NULL) {
 420                         /*
 421                          * Commit callback taskq hasn't been created yet.
 422                          */
 423                         tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
 424                             max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2,
 425                             TASKQ_PREPOPULATE);
 426                 }
 427 
 428                 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
 429                 list_create(cb_list, sizeof (dmu_tx_callback_t),
 430                     offsetof(dmu_tx_callback_t, dcb_node));
 431 
 432                 list_move_tail(&tc->tc_callbacks[g], cb_list);
 433 
 434                 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
 435                     txg_do_callbacks, cb_list, TQ_SLEEP);
 436         }
 437 }
 438 
 439 static void
 440 txg_sync_thread(dsl_pool_t *dp)
 441 {
 442         spa_t *spa = dp->dp_spa;
 443         tx_state_t *tx = &dp->dp_tx;
 444         callb_cpr_t cpr;
 445         uint64_t start, delta;
 446 
 447         txg_thread_enter(tx, &cpr);
 448 
 449         start = delta = 0;
 450         for (;;) {
 451                 uint64_t timer, timeout = zfs_txg_timeout * hz;
 452                 uint64_t txg;
 453 
 454                 /*
 455                  * We sync when we're scanning, there's someone waiting
 456                  * on us, or the quiesce thread has handed off a txg to
 457                  * us, or we have reached our timeout.
 458                  */
 459                 timer = (delta >= timeout ? 0 : timeout - delta);
 460                 while (!dsl_scan_active(dp->dp_scan) &&
 461                     !tx->tx_exiting && timer > 0 &&
 462                     tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
 463                     tx->tx_quiesced_txg == 0) {
 464                         dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
 465                             tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
 466                         txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
 467                         delta = ddi_get_lbolt() - start;
 468                         timer = (delta > timeout ? 0 : timeout - delta);
 469                 }
 470 
 471                 /*
 472                  * Wait until the quiesce thread hands off a txg to us,
 473                  * prompting it to do so if necessary.
 474                  */
 475                 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
 476                         if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
 477                                 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
 478                         cv_broadcast(&tx->tx_quiesce_more_cv);
 479                         txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
 480                 }
 481 
 482                 if (tx->tx_exiting)
 483                         txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
 484 
 485                 /*
 486                  * Consume the quiesced txg which has been handed off to
 487                  * us.  This may cause the quiescing thread to now be
 488                  * able to quiesce another txg, so we must signal it.
 489                  */
 490                 txg = tx->tx_quiesced_txg;
 491                 tx->tx_quiesced_txg = 0;
 492                 tx->tx_syncing_txg = txg;
 493                 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
 494                 cv_broadcast(&tx->tx_quiesce_more_cv);
 495 
 496                 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
 497                     txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
 498                 mutex_exit(&tx->tx_sync_lock);
 499 
 500                 start = ddi_get_lbolt();
 501                 spa_sync(spa, txg);
 502                 delta = ddi_get_lbolt() - start;
 503 
 504                 mutex_enter(&tx->tx_sync_lock);
 505                 tx->tx_synced_txg = txg;
 506                 tx->tx_syncing_txg = 0;
 507                 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
 508                 cv_broadcast(&tx->tx_sync_done_cv);
 509 
 510                 /*
 511                  * Dispatch commit callbacks to worker threads.
 512                  */
 513                 txg_dispatch_callbacks(dp, txg);
 514         }
 515 }
 516 
 517 static void
 518 txg_quiesce_thread(dsl_pool_t *dp)
 519 {
 520         tx_state_t *tx = &dp->dp_tx;
 521         callb_cpr_t cpr;
 522 
 523         txg_thread_enter(tx, &cpr);
 524 
 525         for (;;) {
 526                 uint64_t txg;
 527 
 528                 /*
 529                  * We quiesce when there's someone waiting on us.
 530                  * However, we can only have one txg in "quiescing" or
 531                  * "quiesced, waiting to sync" state.  So we wait until
 532                  * the "quiesced, waiting to sync" txg has been consumed
 533                  * by the sync thread.
 534                  */
 535                 while (!tx->tx_exiting &&
 536                     (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
 537                     tx->tx_quiesced_txg != 0))
 538                         txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
 539 
 540                 if (tx->tx_exiting)
 541                         txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
 542 
 543                 txg = tx->tx_open_txg;
 544                 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
 545                     txg, tx->tx_quiesce_txg_waiting,
 546                     tx->tx_sync_txg_waiting);
 547                 mutex_exit(&tx->tx_sync_lock);
 548                 txg_quiesce(dp, txg);
 549                 mutex_enter(&tx->tx_sync_lock);
 550 
 551                 /*
 552                  * Hand this txg off to the sync thread.
 553                  */
 554                 dprintf("quiesce done, handing off txg %llu\n", txg);
 555                 tx->tx_quiesced_txg = txg;
 556                 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
 557                 cv_broadcast(&tx->tx_sync_more_cv);
 558                 cv_broadcast(&tx->tx_quiesce_done_cv);
 559         }
 560 }
 561 
 562 /*
 563  * Delay this thread by delay nanoseconds if we are still in the open
 564  * transaction group and there is already a waiting txg quiesing or quiesced.
 565  * Abort the delay if this txg stalls or enters the quiesing state.
 566  */
 567 void
 568 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
 569 {
 570         tx_state_t *tx = &dp->dp_tx;
 571         hrtime_t start = gethrtime();
 572 
 573         /* don't delay if this txg could transition to quiesing immediately */
 574         if (tx->tx_open_txg > txg ||
 575             tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
 576                 return;
 577 
 578         mutex_enter(&tx->tx_sync_lock);
 579         if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
 580                 mutex_exit(&tx->tx_sync_lock);
 581                 return;
 582         }
 583 
 584         while (gethrtime() - start < delay &&
 585             tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
 586                 (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
 587                     &tx->tx_sync_lock, delay, resolution, 0);
 588         }
 589 
 590         mutex_exit(&tx->tx_sync_lock);
 591 }
 592 
 593 void
 594 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
 595 {
 596         tx_state_t *tx = &dp->dp_tx;
 597 
 598         ASSERT(!dsl_pool_config_held(dp));
 599 
 600         mutex_enter(&tx->tx_sync_lock);
 601         ASSERT(tx->tx_threads == 2);
 602         if (txg == 0)
 603                 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
 604         if (tx->tx_sync_txg_waiting < txg)
 605                 tx->tx_sync_txg_waiting = txg;
 606         dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
 607             txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
 608         while (tx->tx_synced_txg < txg) {
 609                 dprintf("broadcasting sync more "
 610                     "tx_synced=%llu waiting=%llu dp=%p\n",
 611                     tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
 612                 cv_broadcast(&tx->tx_sync_more_cv);
 613                 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
 614         }
 615         mutex_exit(&tx->tx_sync_lock);
 616 }
 617 
 618 void
 619 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
 620 {
 621         tx_state_t *tx = &dp->dp_tx;
 622 
 623         ASSERT(!dsl_pool_config_held(dp));
 624 
 625         mutex_enter(&tx->tx_sync_lock);
 626         ASSERT(tx->tx_threads == 2);
 627         if (txg == 0)
 628                 txg = tx->tx_open_txg + 1;
 629         if (tx->tx_quiesce_txg_waiting < txg)
 630                 tx->tx_quiesce_txg_waiting = txg;
 631         dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
 632             txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
 633         while (tx->tx_open_txg < txg) {
 634                 cv_broadcast(&tx->tx_quiesce_more_cv);
 635                 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
 636         }
 637         mutex_exit(&tx->tx_sync_lock);
 638 }
 639 
 640 boolean_t
 641 txg_stalled(dsl_pool_t *dp)
 642 {
 643         tx_state_t *tx = &dp->dp_tx;
 644         return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
 645 }
 646 
 647 boolean_t
 648 txg_sync_waiting(dsl_pool_t *dp)
 649 {
 650         tx_state_t *tx = &dp->dp_tx;
 651 
 652         return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
 653             tx->tx_quiesced_txg != 0);
 654 }
 655 
 656 /*
 657  * Per-txg object lists.
 658  */
 659 void
 660 txg_list_create(txg_list_t *tl, size_t offset)
 661 {
 662         int t;
 663 
 664         mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
 665 
 666         tl->tl_offset = offset;
 667 
 668         for (t = 0; t < TXG_SIZE; t++)
 669                 tl->tl_head[t] = NULL;
 670 }
 671 
 672 void
 673 txg_list_destroy(txg_list_t *tl)
 674 {
 675         int t;
 676 
 677         for (t = 0; t < TXG_SIZE; t++)
 678                 ASSERT(txg_list_empty(tl, t));
 679 
 680         mutex_destroy(&tl->tl_lock);
 681 }
 682 
 683 boolean_t
 684 txg_list_empty(txg_list_t *tl, uint64_t txg)
 685 {
 686         return (tl->tl_head[txg & TXG_MASK] == NULL);
 687 }
 688 
 689 /*
 690  * Add an entry to the list (unless it's already on the list).
 691  * Returns B_TRUE if it was actually added.
 692  */
 693 boolean_t
 694 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
 695 {
 696         int t = txg & TXG_MASK;
 697         txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
 698         boolean_t add;
 699 
 700         mutex_enter(&tl->tl_lock);
 701         add = (tn->tn_member[t] == 0);
 702         if (add) {
 703                 tn->tn_member[t] = 1;
 704                 tn->tn_next[t] = tl->tl_head[t];
 705                 tl->tl_head[t] = tn;
 706         }
 707         mutex_exit(&tl->tl_lock);
 708 
 709         return (add);
 710 }
 711 
 712 /*
 713  * Add an entry to the end of the list, unless it's already on the list.
 714  * (walks list to find end)
 715  * Returns B_TRUE if it was actually added.
 716  */
 717 boolean_t
 718 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
 719 {
 720         int t = txg & TXG_MASK;
 721         txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
 722         boolean_t add;
 723 
 724         mutex_enter(&tl->tl_lock);
 725         add = (tn->tn_member[t] == 0);
 726         if (add) {
 727                 txg_node_t **tp;
 728 
 729                 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
 730                         continue;
 731 
 732                 tn->tn_member[t] = 1;
 733                 tn->tn_next[t] = NULL;
 734                 *tp = tn;
 735         }
 736         mutex_exit(&tl->tl_lock);
 737 
 738         return (add);
 739 }
 740 
 741 /*
 742  * Remove the head of the list and return it.
 743  */
 744 void *
 745 txg_list_remove(txg_list_t *tl, uint64_t txg)
 746 {
 747         int t = txg & TXG_MASK;
 748         txg_node_t *tn;
 749         void *p = NULL;
 750 
 751         mutex_enter(&tl->tl_lock);
 752         if ((tn = tl->tl_head[t]) != NULL) {
 753                 p = (char *)tn - tl->tl_offset;
 754                 tl->tl_head[t] = tn->tn_next[t];
 755                 tn->tn_next[t] = NULL;
 756                 tn->tn_member[t] = 0;
 757         }
 758         mutex_exit(&tl->tl_lock);
 759 
 760         return (p);
 761 }
 762 
 763 /*
 764  * Remove a specific item from the list and return it.
 765  */
 766 void *
 767 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
 768 {
 769         int t = txg & TXG_MASK;
 770         txg_node_t *tn, **tp;
 771 
 772         mutex_enter(&tl->tl_lock);
 773 
 774         for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
 775                 if ((char *)tn - tl->tl_offset == p) {
 776                         *tp = tn->tn_next[t];
 777                         tn->tn_next[t] = NULL;
 778                         tn->tn_member[t] = 0;
 779                         mutex_exit(&tl->tl_lock);
 780                         return (p);
 781                 }
 782         }
 783 
 784         mutex_exit(&tl->tl_lock);
 785 
 786         return (NULL);
 787 }
 788 
 789 boolean_t
 790 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
 791 {
 792         int t = txg & TXG_MASK;
 793         txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
 794 
 795         return (tn->tn_member[t] != 0);
 796 }
 797 
 798 /*
 799  * Walk a txg list -- only safe if you know it's not changing.
 800  */
 801 void *
 802 txg_list_head(txg_list_t *tl, uint64_t txg)
 803 {
 804         int t = txg & TXG_MASK;
 805         txg_node_t *tn = tl->tl_head[t];
 806 
 807         return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
 808 }
 809 
 810 void *
 811 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
 812 {
 813         int t = txg & TXG_MASK;
 814         txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
 815 
 816         tn = tn->tn_next[t];
 817 
 818         return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
 819 }