Print this page
2882 implement libzfs_core
2883 changing "canmount" property to "on" should not always remount dataset
2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Bill Pijewski <wdp@joyent.com>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dsl_synctask.c
+++ new/usr/src/uts/common/fs/zfs/dsl_synctask.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright (c) 2012 by Delphix. All rights reserved.
23 24 */
24 25
25 26 #include <sys/dmu.h>
26 27 #include <sys/dmu_tx.h>
27 28 #include <sys/dsl_pool.h>
28 29 #include <sys/dsl_dir.h>
29 30 #include <sys/dsl_synctask.h>
30 31 #include <sys/metaslab.h>
31 32
32 33 #define DST_AVG_BLKSHIFT 14
33 34
34 35 /* ARGSUSED */
35 36 static int
36 37 dsl_null_checkfunc(void *arg1, void *arg2, dmu_tx_t *tx)
37 38 {
38 39 return (0);
39 40 }
40 41
41 42 dsl_sync_task_group_t *
42 43 dsl_sync_task_group_create(dsl_pool_t *dp)
43 44 {
44 45 dsl_sync_task_group_t *dstg;
45 46
46 47 dstg = kmem_zalloc(sizeof (dsl_sync_task_group_t), KM_SLEEP);
47 48 list_create(&dstg->dstg_tasks, sizeof (dsl_sync_task_t),
48 49 offsetof(dsl_sync_task_t, dst_node));
49 50 dstg->dstg_pool = dp;
50 51
51 52 return (dstg);
52 53 }
53 54
54 55 void
55 56 dsl_sync_task_create(dsl_sync_task_group_t *dstg,
56 57 dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
57 58 void *arg1, void *arg2, int blocks_modified)
58 59 {
59 60 dsl_sync_task_t *dst;
60 61
61 62 if (checkfunc == NULL)
62 63 checkfunc = dsl_null_checkfunc;
63 64 dst = kmem_zalloc(sizeof (dsl_sync_task_t), KM_SLEEP);
64 65 dst->dst_checkfunc = checkfunc;
65 66 dst->dst_syncfunc = syncfunc;
66 67 dst->dst_arg1 = arg1;
67 68 dst->dst_arg2 = arg2;
68 69 list_insert_tail(&dstg->dstg_tasks, dst);
69 70
70 71 dstg->dstg_space += blocks_modified << DST_AVG_BLKSHIFT;
71 72 }
72 73
73 74 int
74 75 dsl_sync_task_group_wait(dsl_sync_task_group_t *dstg)
75 76 {
76 77 dmu_tx_t *tx;
77 78 uint64_t txg;
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
78 79 dsl_sync_task_t *dst;
79 80
80 81 top:
81 82 tx = dmu_tx_create_dd(dstg->dstg_pool->dp_mos_dir);
82 83 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
83 84
84 85 txg = dmu_tx_get_txg(tx);
85 86
86 87 /* Do a preliminary error check. */
87 88 dstg->dstg_err = 0;
89 +#ifdef ZFS_DEBUG
90 + /*
91 + * Only check half the time, otherwise, the sync-context
92 + * check will almost never fail.
93 + */
94 + if (spa_get_random(2) == 0)
95 + goto skip;
96 +#endif
88 97 rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_READER);
89 98 for (dst = list_head(&dstg->dstg_tasks); dst;
90 99 dst = list_next(&dstg->dstg_tasks, dst)) {
91 -#ifdef ZFS_DEBUG
92 - /*
93 - * Only check half the time, otherwise, the sync-context
94 - * check will almost never fail.
95 - */
96 - if (spa_get_random(2) == 0)
97 - continue;
98 -#endif
99 100 dst->dst_err =
100 101 dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx);
101 102 if (dst->dst_err)
102 103 dstg->dstg_err = dst->dst_err;
103 104 }
104 105 rw_exit(&dstg->dstg_pool->dp_config_rwlock);
105 106
106 107 if (dstg->dstg_err) {
107 108 dmu_tx_commit(tx);
108 109 return (dstg->dstg_err);
109 110 }
111 +skip:
110 112
111 113 /*
112 114 * We don't generally have many sync tasks, so pay the price of
113 115 * add_tail to get the tasks executed in the right order.
114 116 */
115 117 VERIFY(0 == txg_list_add_tail(&dstg->dstg_pool->dp_sync_tasks,
116 118 dstg, txg));
117 119
118 120 dmu_tx_commit(tx);
119 121
120 122 txg_wait_synced(dstg->dstg_pool, txg);
121 123
122 124 if (dstg->dstg_err == EAGAIN) {
123 125 txg_wait_synced(dstg->dstg_pool, txg + TXG_DEFER_SIZE);
124 126 goto top;
125 127 }
126 128
127 129 return (dstg->dstg_err);
128 130 }
129 131
130 132 void
131 133 dsl_sync_task_group_nowait(dsl_sync_task_group_t *dstg, dmu_tx_t *tx)
132 134 {
133 135 uint64_t txg;
134 136
135 137 dstg->dstg_nowaiter = B_TRUE;
136 138 txg = dmu_tx_get_txg(tx);
137 139 /*
138 140 * We don't generally have many sync tasks, so pay the price of
139 141 * add_tail to get the tasks executed in the right order.
140 142 */
141 143 VERIFY(0 == txg_list_add_tail(&dstg->dstg_pool->dp_sync_tasks,
142 144 dstg, txg));
143 145 }
144 146
145 147 void
146 148 dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg)
147 149 {
148 150 dsl_sync_task_t *dst;
149 151
150 152 while (dst = list_head(&dstg->dstg_tasks)) {
151 153 list_remove(&dstg->dstg_tasks, dst);
152 154 kmem_free(dst, sizeof (dsl_sync_task_t));
153 155 }
154 156 kmem_free(dstg, sizeof (dsl_sync_task_group_t));
155 157 }
156 158
157 159 void
158 160 dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx)
159 161 {
160 162 dsl_sync_task_t *dst;
161 163 dsl_pool_t *dp = dstg->dstg_pool;
162 164 uint64_t quota, used;
163 165
164 166 ASSERT3U(dstg->dstg_err, ==, 0);
165 167
166 168 /*
167 169 * Check for sufficient space. We just check against what's
168 170 * on-disk; we don't want any in-flight accounting to get in our
169 171 * way, because open context may have already used up various
170 172 * in-core limits (arc_tempreserve, dsl_pool_tempreserve).
171 173 */
172 174 quota = dsl_pool_adjustedsize(dp, B_FALSE) -
173 175 metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
174 176 used = dp->dp_root_dir->dd_phys->dd_used_bytes;
175 177 /* MOS space is triple-dittoed, so we multiply by 3. */
176 178 if (dstg->dstg_space > 0 && used + dstg->dstg_space * 3 > quota) {
177 179 dstg->dstg_err = ENOSPC;
178 180 return;
179 181 }
180 182
181 183 /*
182 184 * Check for errors by calling checkfuncs.
183 185 */
184 186 rw_enter(&dp->dp_config_rwlock, RW_WRITER);
185 187 for (dst = list_head(&dstg->dstg_tasks); dst;
186 188 dst = list_next(&dstg->dstg_tasks, dst)) {
187 189 dst->dst_err =
188 190 dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx);
189 191 if (dst->dst_err)
190 192 dstg->dstg_err = dst->dst_err;
191 193 }
192 194
193 195 if (dstg->dstg_err == 0) {
194 196 /*
195 197 * Execute sync tasks.
196 198 */
197 199 for (dst = list_head(&dstg->dstg_tasks); dst;
198 200 dst = list_next(&dstg->dstg_tasks, dst)) {
199 201 dst->dst_syncfunc(dst->dst_arg1, dst->dst_arg2, tx);
200 202 }
201 203 }
202 204 rw_exit(&dp->dp_config_rwlock);
203 205
204 206 if (dstg->dstg_nowaiter)
205 207 dsl_sync_task_group_destroy(dstg);
206 208 }
207 209
208 210 int
209 211 dsl_sync_task_do(dsl_pool_t *dp,
210 212 dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
211 213 void *arg1, void *arg2, int blocks_modified)
212 214 {
213 215 dsl_sync_task_group_t *dstg;
214 216 int err;
215 217
216 218 ASSERT(spa_writeable(dp->dp_spa));
217 219
218 220 dstg = dsl_sync_task_group_create(dp);
219 221 dsl_sync_task_create(dstg, checkfunc, syncfunc,
220 222 arg1, arg2, blocks_modified);
221 223 err = dsl_sync_task_group_wait(dstg);
222 224 dsl_sync_task_group_destroy(dstg);
223 225 return (err);
224 226 }
225 227
226 228 void
227 229 dsl_sync_task_do_nowait(dsl_pool_t *dp,
228 230 dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc,
229 231 void *arg1, void *arg2, int blocks_modified, dmu_tx_t *tx)
230 232 {
231 233 dsl_sync_task_group_t *dstg;
232 234
233 235 if (!spa_writeable(dp->dp_spa))
234 236 return;
235 237
236 238 dstg = dsl_sync_task_group_create(dp);
237 239 dsl_sync_task_create(dstg, checkfunc, syncfunc,
238 240 arg1, arg2, blocks_modified);
239 241 dsl_sync_task_group_nowait(dstg, tx);
240 242 }
↓ open down ↓ |
121 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX