Print this page
4045 zfs write throttle & i/o scheduler performance work
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/sys/txg_impl.h
+++ new/usr/src/uts/common/fs/zfs/sys/txg_impl.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 +
21 22 /*
22 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 24 * Use is subject to license terms.
24 25 */
25 26
26 27 /*
27 28 * Copyright (c) 2013 by Delphix. All rights reserved.
28 29 */
29 30
30 31 #ifndef _SYS_TXG_IMPL_H
31 32 #define _SYS_TXG_IMPL_H
32 33
33 34 #include <sys/spa.h>
34 35 #include <sys/txg.h>
35 36
36 37 #ifdef __cplusplus
37 38 extern "C" {
38 39 #endif
39 40
40 41 /*
41 42 * The tx_cpu structure is a per-cpu structure that is used to track
42 43 * the number of active transaction holds (tc_count). As transactions
43 44 * are assigned into a transaction group the appropriate tc_count is
44 45 * incremented to indicate that there are pending changes that have yet
45 46 * to quiesce. Consumers evenutally call txg_rele_to_sync() to decrement
46 47 * the tc_count. A transaction group is not considered quiesced until all
47 48 * tx_cpu structures have reached a tc_count of zero.
48 49 *
49 50 * This structure is a per-cpu structure by design. Updates to this structure
50 51 * are frequent and concurrent. Having a single structure would result in
51 52 * heavy lock contention so a per-cpu design was implemented. With the fanned
52 53 * out mutex design, consumers only need to lock the mutex associated with
53 54 * thread's cpu.
54 55 *
55 56 * The tx_cpu contains two locks, the tc_lock and tc_open_lock.
56 57 * The tc_lock is used to protect all members of the tx_cpu structure with
57 58 * the exception of the tc_open_lock. This lock should only be held for a
58 59 * short period of time, typically when updating the value of tc_count.
59 60 *
60 61 * The tc_open_lock protects the tx_open_txg member of the tx_state structure.
61 62 * This lock is used to ensure that transactions are only assigned into
62 63 * the current open transaction group. In order to move the current open
63 64 * transaction group to the quiesce phase, the txg_quiesce thread must
64 65 * grab all tc_open_locks, increment the tx_open_txg, and drop the locks.
65 66 * The tc_open_lock is held until the transaction is assigned into the
66 67 * transaction group. Typically, this is a short operation but if throttling
67 68 * is occuring it may be held for longer periods of time.
68 69 */
69 70 struct tx_cpu {
70 71 kmutex_t tc_open_lock; /* protects tx_open_txg */
71 72 kmutex_t tc_lock; /* protects the rest of this struct */
72 73 kcondvar_t tc_cv[TXG_SIZE];
73 74 uint64_t tc_count[TXG_SIZE]; /* tx hold count on each txg */
74 75 list_t tc_callbacks[TXG_SIZE]; /* commit cb list */
75 76 char tc_pad[8]; /* pad to fill 3 cache lines */
76 77 };
77 78
78 79 /*
79 80 * The tx_state structure maintains the state information about the different
80 81 * stages of the pool's transcation groups. A per pool tx_state structure
81 82 * is used to track this information. The tx_state structure also points to
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
82 83 * an array of tx_cpu structures (described above). Although the tx_sync_lock
83 84 * is used to protect the members of this structure, it is not used to
84 85 * protect the tx_open_txg. Instead a special lock in the tx_cpu structure
85 86 * is used. Readers of tx_open_txg must grab the per-cpu tc_open_lock.
86 87 * Any thread wishing to update tx_open_txg must grab the tc_open_lock on
87 88 * every cpu (see txg_quiesce()).
88 89 */
89 90 typedef struct tx_state {
90 91 tx_cpu_t *tx_cpu; /* protects access to tx_open_txg */
91 92 kmutex_t tx_sync_lock; /* protects the rest of this struct */
93 +
92 94 uint64_t tx_open_txg; /* currently open txg id */
93 95 uint64_t tx_quiesced_txg; /* quiesced txg waiting for sync */
94 96 uint64_t tx_syncing_txg; /* currently syncing txg id */
95 97 uint64_t tx_synced_txg; /* last synced txg id */
96 98
99 + hrtime_t tx_open_time; /* start time of tx_open_txg */
100 +
97 101 uint64_t tx_sync_txg_waiting; /* txg we're waiting to sync */
98 102 uint64_t tx_quiesce_txg_waiting; /* txg we're waiting to open */
99 103
100 104 kcondvar_t tx_sync_more_cv;
101 105 kcondvar_t tx_sync_done_cv;
102 106 kcondvar_t tx_quiesce_more_cv;
103 107 kcondvar_t tx_quiesce_done_cv;
104 108 kcondvar_t tx_timeout_cv;
105 109 kcondvar_t tx_exit_cv; /* wait for all threads to exit */
106 110
107 111 uint8_t tx_threads; /* number of threads */
108 112 uint8_t tx_exiting; /* set when we're exiting */
109 113
110 114 kthread_t *tx_sync_thread;
111 115 kthread_t *tx_quiesce_thread;
112 116
113 117 taskq_t *tx_commit_cb_taskq; /* commit callback taskq */
114 118 } tx_state_t;
115 119
116 120 #ifdef __cplusplus
117 121 }
118 122 #endif
119 123
120 124 #endif /* _SYS_TXG_IMPL_H */
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX