Print this page
2882 implement libzfs_core
2883 changing "canmount" property to "on" should not always remount dataset
2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Bill Pijewski <wdp@joyent.com>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/spa_history.c
+++ new/usr/src/uts/common/fs/zfs/spa_history.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 - * Copyright (c) 2011 by Delphix. All rights reserved.
24 + * Copyright (c) 2012 by Delphix. All rights reserved.
25 25 */
26 26
27 27 #include <sys/spa.h>
28 28 #include <sys/spa_impl.h>
29 29 #include <sys/zap.h>
30 30 #include <sys/dsl_synctask.h>
31 31 #include <sys/dmu_tx.h>
32 32 #include <sys/dmu_objset.h>
33 +#include <sys/dsl_dataset.h>
34 +#include <sys/dsl_dir.h>
33 35 #include <sys/utsname.h>
34 36 #include <sys/cmn_err.h>
35 37 #include <sys/sunddi.h>
38 +#include <sys/cred.h>
36 39 #include "zfs_comutil.h"
37 40 #ifdef _KERNEL
38 41 #include <sys/zone.h>
39 42 #endif
40 43
41 44 /*
42 45 * Routines to manage the on-disk history log.
43 46 *
44 47 * The history log is stored as a dmu object containing
45 48 * <packed record length, record nvlist> tuples.
46 49 *
47 50 * Where "record nvlist" is a nvlist containing uint64_ts and strings, and
48 51 * "packed record length" is the packed length of the "record nvlist" stored
49 52 * as a little endian uint64_t.
50 53 *
51 54 * The log is implemented as a ring buffer, though the original creation
52 55 * of the pool ('zpool create') is never overwritten.
53 56 *
54 57 * The history log is tracked as object 'spa_t::spa_history'. The bonus buffer
55 58 * of 'spa_history' stores the offsets for logging/retrieving history as
56 59 * 'spa_history_phys_t'. 'sh_pool_create_len' is the ending offset in bytes of
57 60 * where the 'zpool create' record is stored. This allows us to never
58 61 * overwrite the original creation of the pool. 'sh_phys_max_off' is the
59 62 * physical ending offset in bytes of the log. This tells you the length of
60 63 * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
61 64 * is added, 'sh_eof' is incremented by the the size of the record.
62 65 * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
63 66 * This is where the consumer should start reading from after reading in
64 67 * the 'zpool create' portion of the log.
65 68 *
66 69 * 'sh_records_lost' keeps track of how many records have been overwritten
67 70 * and permanently lost.
68 71 */
69 72
70 73 /* convert a logical offset to physical */
71 74 static uint64_t
72 75 spa_history_log_to_phys(uint64_t log_off, spa_history_phys_t *shpp)
73 76 {
74 77 uint64_t phys_len;
75 78
76 79 phys_len = shpp->sh_phys_max_off - shpp->sh_pool_create_len;
77 80 return ((log_off - shpp->sh_pool_create_len) % phys_len
78 81 + shpp->sh_pool_create_len);
79 82 }
80 83
81 84 void
82 85 spa_history_create_obj(spa_t *spa, dmu_tx_t *tx)
83 86 {
84 87 dmu_buf_t *dbp;
85 88 spa_history_phys_t *shpp;
86 89 objset_t *mos = spa->spa_meta_objset;
87 90
88 91 ASSERT(spa->spa_history == 0);
89 92 spa->spa_history = dmu_object_alloc(mos, DMU_OT_SPA_HISTORY,
90 93 SPA_MAXBLOCKSIZE, DMU_OT_SPA_HISTORY_OFFSETS,
91 94 sizeof (spa_history_phys_t), tx);
92 95
93 96 VERIFY(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
94 97 DMU_POOL_HISTORY, sizeof (uint64_t), 1,
95 98 &spa->spa_history, tx) == 0);
96 99
97 100 VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
98 101 ASSERT(dbp->db_size >= sizeof (spa_history_phys_t));
99 102
100 103 shpp = dbp->db_data;
101 104 dmu_buf_will_dirty(dbp, tx);
102 105
103 106 /*
104 107 * Figure out maximum size of history log. We set it at
105 108 * 0.1% of pool size, with a max of 1G and min of 128KB.
106 109 */
107 110 shpp->sh_phys_max_off =
108 111 metaslab_class_get_dspace(spa_normal_class(spa)) / 1000;
109 112 shpp->sh_phys_max_off = MIN(shpp->sh_phys_max_off, 1<<30);
110 113 shpp->sh_phys_max_off = MAX(shpp->sh_phys_max_off, 128<<10);
111 114
112 115 dmu_buf_rele(dbp, FTAG);
113 116 }
114 117
115 118 /*
116 119 * Change 'sh_bof' to the beginning of the next record.
117 120 */
118 121 static int
119 122 spa_history_advance_bof(spa_t *spa, spa_history_phys_t *shpp)
120 123 {
121 124 objset_t *mos = spa->spa_meta_objset;
122 125 uint64_t firstread, reclen, phys_bof;
123 126 char buf[sizeof (reclen)];
124 127 int err;
125 128
126 129 phys_bof = spa_history_log_to_phys(shpp->sh_bof, shpp);
127 130 firstread = MIN(sizeof (reclen), shpp->sh_phys_max_off - phys_bof);
128 131
129 132 if ((err = dmu_read(mos, spa->spa_history, phys_bof, firstread,
130 133 buf, DMU_READ_PREFETCH)) != 0)
131 134 return (err);
132 135 if (firstread != sizeof (reclen)) {
133 136 if ((err = dmu_read(mos, spa->spa_history,
134 137 shpp->sh_pool_create_len, sizeof (reclen) - firstread,
135 138 buf + firstread, DMU_READ_PREFETCH)) != 0)
136 139 return (err);
137 140 }
138 141
139 142 reclen = LE_64(*((uint64_t *)buf));
140 143 shpp->sh_bof += reclen + sizeof (reclen);
141 144 shpp->sh_records_lost++;
142 145 return (0);
143 146 }
144 147
145 148 static int
146 149 spa_history_write(spa_t *spa, void *buf, uint64_t len, spa_history_phys_t *shpp,
147 150 dmu_tx_t *tx)
148 151 {
149 152 uint64_t firstwrite, phys_eof;
150 153 objset_t *mos = spa->spa_meta_objset;
151 154 int err;
152 155
153 156 ASSERT(MUTEX_HELD(&spa->spa_history_lock));
154 157
155 158 /* see if we need to reset logical BOF */
156 159 while (shpp->sh_phys_max_off - shpp->sh_pool_create_len -
157 160 (shpp->sh_eof - shpp->sh_bof) <= len) {
158 161 if ((err = spa_history_advance_bof(spa, shpp)) != 0) {
159 162 return (err);
160 163 }
161 164 }
162 165
163 166 phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp);
164 167 firstwrite = MIN(len, shpp->sh_phys_max_off - phys_eof);
165 168 shpp->sh_eof += len;
166 169 dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx);
167 170
168 171 len -= firstwrite;
↓ open down ↓ |
123 lines elided |
↑ open up ↑ |
169 172 if (len > 0) {
170 173 /* write out the rest at the beginning of physical file */
171 174 dmu_write(mos, spa->spa_history, shpp->sh_pool_create_len,
172 175 len, (char *)buf + firstwrite, tx);
173 176 }
174 177
175 178 return (0);
176 179 }
177 180
178 181 static char *
179 -spa_history_zone()
182 +spa_history_zone(void)
180 183 {
181 184 #ifdef _KERNEL
185 + if (INGLOBALZONE(curproc))
186 + return (NULL);
182 187 return (curproc->p_zone->zone_name);
183 188 #else
184 - return ("global");
189 + return (NULL);
185 190 #endif
186 191 }
187 192
188 193 /*
189 194 * Write out a history event.
190 195 */
191 196 /*ARGSUSED*/
192 197 static void
193 198 spa_history_log_sync(void *arg1, void *arg2, dmu_tx_t *tx)
194 199 {
195 200 spa_t *spa = arg1;
196 - history_arg_t *hap = arg2;
197 - const char *history_str = hap->ha_history_str;
201 + nvlist_t *nvl = arg2;
198 202 objset_t *mos = spa->spa_meta_objset;
199 203 dmu_buf_t *dbp;
200 204 spa_history_phys_t *shpp;
201 205 size_t reclen;
202 206 uint64_t le_len;
203 - nvlist_t *nvrecord;
204 207 char *record_packed = NULL;
205 208 int ret;
206 209
207 210 /*
208 211 * If we have an older pool that doesn't have a command
209 212 * history object, create it now.
210 213 */
211 214 mutex_enter(&spa->spa_history_lock);
212 215 if (!spa->spa_history)
213 216 spa_history_create_obj(spa, tx);
214 217 mutex_exit(&spa->spa_history_lock);
215 218
216 219 /*
217 220 * Get the offset of where we need to write via the bonus buffer.
218 221 * Update the offset when the write completes.
219 222 */
220 223 VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
221 224 shpp = dbp->db_data;
222 225
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
223 226 dmu_buf_will_dirty(dbp, tx);
224 227
225 228 #ifdef ZFS_DEBUG
226 229 {
227 230 dmu_object_info_t doi;
228 231 dmu_object_info_from_db(dbp, &doi);
229 232 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
230 233 }
231 234 #endif
232 235
233 - VERIFY(nvlist_alloc(&nvrecord, NV_UNIQUE_NAME, KM_SLEEP) == 0);
234 - VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TIME,
235 - gethrestime_sec()) == 0);
236 - VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_WHO, hap->ha_uid) == 0);
237 - if (hap->ha_zone != NULL)
238 - VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_ZONE,
239 - hap->ha_zone) == 0);
236 + fnvlist_add_uint64(nvl, ZPOOL_HIST_TIME, gethrestime_sec());
240 237 #ifdef _KERNEL
241 - VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_HOST,
242 - utsname.nodename) == 0);
238 + fnvlist_add_string(nvl, ZPOOL_HIST_HOST, utsname.nodename);
243 239 #endif
244 - if (hap->ha_log_type == LOG_CMD_POOL_CREATE ||
245 - hap->ha_log_type == LOG_CMD_NORMAL) {
246 - VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_CMD,
247 - history_str) == 0);
248 -
249 - zfs_dbgmsg("command: %s", history_str);
250 - } else {
251 - VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_INT_EVENT,
252 - hap->ha_event) == 0);
253 - VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TXG,
254 - tx->tx_txg) == 0);
255 - VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_INT_STR,
256 - history_str) == 0);
257 -
258 - zfs_dbgmsg("internal %s pool:%s txg:%llu %s",
259 - zfs_history_event_names[hap->ha_event], spa_name(spa),
260 - (longlong_t)tx->tx_txg, history_str);
261 -
240 + if (nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
241 + zfs_dbgmsg("command: %s",
242 + fnvlist_lookup_string(nvl, ZPOOL_HIST_CMD));
243 + } else if (nvlist_exists(nvl, ZPOOL_HIST_INT_NAME)) {
244 + if (nvlist_exists(nvl, ZPOOL_HIST_DSNAME)) {
245 + zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
246 + fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
247 + fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
248 + fnvlist_lookup_string(nvl, ZPOOL_HIST_DSNAME),
249 + fnvlist_lookup_uint64(nvl, ZPOOL_HIST_DSID),
250 + fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
251 + } else {
252 + zfs_dbgmsg("txg %lld %s %s",
253 + fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
254 + fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
255 + fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
256 + }
257 + } else if (nvlist_exists(nvl, ZPOOL_HIST_IOCTL)) {
258 + zfs_dbgmsg("ioctl %s",
259 + fnvlist_lookup_string(nvl, ZPOOL_HIST_IOCTL));
262 260 }
263 261
264 - VERIFY(nvlist_size(nvrecord, &reclen, NV_ENCODE_XDR) == 0);
265 - record_packed = kmem_alloc(reclen, KM_SLEEP);
266 -
267 - VERIFY(nvlist_pack(nvrecord, &record_packed, &reclen,
268 - NV_ENCODE_XDR, KM_SLEEP) == 0);
262 + record_packed = fnvlist_pack(nvl, &reclen);
269 263
270 264 mutex_enter(&spa->spa_history_lock);
271 - if (hap->ha_log_type == LOG_CMD_POOL_CREATE)
272 - VERIFY(shpp->sh_eof == shpp->sh_pool_create_len);
273 265
274 266 /* write out the packed length as little endian */
275 267 le_len = LE_64((uint64_t)reclen);
276 268 ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx);
277 269 if (!ret)
278 270 ret = spa_history_write(spa, record_packed, reclen, shpp, tx);
279 271
280 - if (!ret && hap->ha_log_type == LOG_CMD_POOL_CREATE) {
281 - shpp->sh_pool_create_len += sizeof (le_len) + reclen;
282 - shpp->sh_bof = shpp->sh_pool_create_len;
272 + /* The first command is the create, which we keep forever */
273 + if (ret == 0 && shpp->sh_pool_create_len == 0 &&
274 + nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
275 + shpp->sh_pool_create_len = shpp->sh_bof = shpp->sh_eof;
283 276 }
284 277
285 278 mutex_exit(&spa->spa_history_lock);
286 - nvlist_free(nvrecord);
287 - kmem_free(record_packed, reclen);
279 + fnvlist_pack_free(record_packed, reclen);
288 280 dmu_buf_rele(dbp, FTAG);
289 -
290 - strfree(hap->ha_history_str);
291 - if (hap->ha_zone != NULL)
292 - strfree(hap->ha_zone);
293 - kmem_free(hap, sizeof (history_arg_t));
281 + fnvlist_free(nvl);
294 282 }
295 283
296 284 /*
297 285 * Write out a history event.
298 286 */
299 287 int
300 -spa_history_log(spa_t *spa, const char *history_str, history_log_type_t what)
288 +spa_history_log(spa_t *spa, const char *msg)
289 +{
290 + int err;
291 + nvlist_t *nvl = fnvlist_alloc();
292 +
293 + fnvlist_add_string(nvl, ZPOOL_HIST_CMD, msg);
294 + err = spa_history_log_nvl(spa, nvl);
295 + fnvlist_free(nvl);
296 + return (err);
297 +}
298 +
299 +int
300 +spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
301 301 {
302 - history_arg_t *ha;
303 302 int err = 0;
304 303 dmu_tx_t *tx;
304 + nvlist_t *nvarg;
305 305
306 - ASSERT(what != LOG_INTERNAL);
306 + if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY)
307 + return (EINVAL);
307 308
308 309 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
309 310 err = dmu_tx_assign(tx, TXG_WAIT);
310 311 if (err) {
311 312 dmu_tx_abort(tx);
312 313 return (err);
313 314 }
314 315
315 - ha = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
316 - ha->ha_history_str = strdup(history_str);
317 - ha->ha_zone = strdup(spa_history_zone());
318 - ha->ha_log_type = what;
319 - ha->ha_uid = crgetuid(CRED());
316 + nvarg = fnvlist_dup(nvl);
317 + if (spa_history_zone() != NULL) {
318 + fnvlist_add_string(nvarg, ZPOOL_HIST_ZONE,
319 + spa_history_zone());
320 + }
321 + fnvlist_add_uint64(nvarg, ZPOOL_HIST_WHO, crgetruid(CRED()));
320 322
321 323 /* Kick this off asynchronously; errors are ignored. */
322 324 dsl_sync_task_do_nowait(spa_get_dsl(spa), NULL,
323 - spa_history_log_sync, spa, ha, 0, tx);
325 + spa_history_log_sync, spa, nvarg, 0, tx);
324 326 dmu_tx_commit(tx);
325 327
326 - /* spa_history_log_sync will free ha and strings */
328 + /* spa_history_log_sync will free nvl */
327 329 return (err);
330 +
328 331 }
329 332
330 333 /*
331 334 * Read out the command history.
332 335 */
333 336 int
334 337 spa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf)
335 338 {
336 339 objset_t *mos = spa->spa_meta_objset;
337 340 dmu_buf_t *dbp;
338 341 uint64_t read_len, phys_read_off, phys_eof;
339 342 uint64_t leftover = 0;
340 343 spa_history_phys_t *shpp;
341 344 int err;
342 345
343 346 /*
344 - * If the command history doesn't exist (older pool),
347 + * If the command history doesn't exist (older pool),
345 348 * that's ok, just return ENOENT.
346 349 */
347 350 if (!spa->spa_history)
348 351 return (ENOENT);
349 352
350 353 /*
351 354 * The history is logged asynchronously, so when they request
352 355 * the first chunk of history, make sure everything has been
353 356 * synced to disk so that we get it.
354 357 */
355 358 if (*offp == 0 && spa_writeable(spa))
356 359 txg_wait_synced(spa_get_dsl(spa), 0);
357 360
358 361 if ((err = dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)) != 0)
359 362 return (err);
360 363 shpp = dbp->db_data;
361 364
362 365 #ifdef ZFS_DEBUG
363 366 {
364 367 dmu_object_info_t doi;
365 368 dmu_object_info_from_db(dbp, &doi);
366 369 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
367 370 }
368 371 #endif
369 372
370 373 mutex_enter(&spa->spa_history_lock);
371 374 phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp);
372 375
373 376 if (*offp < shpp->sh_pool_create_len) {
374 377 /* read in just the zpool create history */
375 378 phys_read_off = *offp;
376 379 read_len = MIN(*len, shpp->sh_pool_create_len -
377 380 phys_read_off);
378 381 } else {
379 382 /*
380 383 * Need to reset passed in offset to BOF if the passed in
381 384 * offset has since been overwritten.
382 385 */
383 386 *offp = MAX(*offp, shpp->sh_bof);
384 387 phys_read_off = spa_history_log_to_phys(*offp, shpp);
385 388
386 389 /*
387 390 * Read up to the minimum of what the user passed down or
388 391 * the EOF (physical or logical). If we hit physical EOF,
389 392 * use 'leftover' to read from the physical BOF.
390 393 */
391 394 if (phys_read_off <= phys_eof) {
392 395 read_len = MIN(*len, phys_eof - phys_read_off);
393 396 } else {
394 397 read_len = MIN(*len,
395 398 shpp->sh_phys_max_off - phys_read_off);
396 399 if (phys_read_off + *len > shpp->sh_phys_max_off) {
397 400 leftover = MIN(*len - read_len,
398 401 phys_eof - shpp->sh_pool_create_len);
399 402 }
400 403 }
401 404 }
402 405
403 406 /* offset for consumer to use next */
404 407 *offp += read_len + leftover;
405 408
406 409 /* tell the consumer how much you actually read */
407 410 *len = read_len + leftover;
408 411
409 412 if (read_len == 0) {
410 413 mutex_exit(&spa->spa_history_lock);
411 414 dmu_buf_rele(dbp, FTAG);
412 415 return (0);
413 416 }
414 417
415 418 err = dmu_read(mos, spa->spa_history, phys_read_off, read_len, buf,
416 419 DMU_READ_PREFETCH);
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
417 420 if (leftover && err == 0) {
418 421 err = dmu_read(mos, spa->spa_history, shpp->sh_pool_create_len,
419 422 leftover, buf + read_len, DMU_READ_PREFETCH);
420 423 }
421 424 mutex_exit(&spa->spa_history_lock);
422 425
423 426 dmu_buf_rele(dbp, FTAG);
424 427 return (err);
425 428 }
426 429
430 +/*
431 + * The nvlist will be consumed by this call.
432 + */
427 433 static void
428 -log_internal(history_internal_events_t event, spa_t *spa,
434 +log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
429 435 dmu_tx_t *tx, const char *fmt, va_list adx)
430 436 {
431 - history_arg_t *ha;
437 + char *msg;
432 438
433 439 /*
434 440 * If this is part of creating a pool, not everything is
435 441 * initialized yet, so don't bother logging the internal events.
436 442 */
437 443 if (tx->tx_txg == TXG_INITIAL)
438 444 return;
439 445
440 - ha = kmem_alloc(sizeof (history_arg_t), KM_SLEEP);
441 - ha->ha_history_str = kmem_alloc(vsnprintf(NULL, 0, fmt, adx) + 1,
442 - KM_SLEEP);
443 -
444 - (void) vsprintf(ha->ha_history_str, fmt, adx);
445 -
446 - ha->ha_log_type = LOG_INTERNAL;
447 - ha->ha_event = event;
448 - ha->ha_zone = NULL;
449 - ha->ha_uid = 0;
446 + msg = kmem_alloc(vsnprintf(NULL, 0, fmt, adx) + 1, KM_SLEEP);
447 + (void) vsprintf(msg, fmt, adx);
448 + fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
449 + strfree(msg);
450 +
451 + fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
452 + fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);
450 453
451 454 if (dmu_tx_is_syncing(tx)) {
452 - spa_history_log_sync(spa, ha, tx);
455 + spa_history_log_sync(spa, nvl, tx);
453 456 } else {
454 457 dsl_sync_task_do_nowait(spa_get_dsl(spa), NULL,
455 - spa_history_log_sync, spa, ha, 0, tx);
458 + spa_history_log_sync, spa, nvl, 0, tx);
456 459 }
457 - /* spa_history_log_sync() will free ha and strings */
460 + /* spa_history_log_sync() will free nvl */
458 461 }
459 462
460 463 void
461 -spa_history_log_internal(history_internal_events_t event, spa_t *spa,
464 +spa_history_log_internal(spa_t *spa, const char *operation,
462 465 dmu_tx_t *tx, const char *fmt, ...)
463 466 {
464 467 dmu_tx_t *htx = tx;
465 468 va_list adx;
466 469
467 470 /* create a tx if we didn't get one */
468 471 if (tx == NULL) {
469 472 htx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
470 473 if (dmu_tx_assign(htx, TXG_WAIT) != 0) {
471 474 dmu_tx_abort(htx);
472 475 return;
473 476 }
474 477 }
475 478
476 479 va_start(adx, fmt);
477 - log_internal(event, spa, htx, fmt, adx);
480 + log_internal(fnvlist_alloc(), operation, spa, htx, fmt, adx);
478 481 va_end(adx);
479 482
480 483 /* if we didn't get a tx from the caller, commit the one we made */
481 484 if (tx == NULL)
482 485 dmu_tx_commit(htx);
483 486 }
484 487
485 488 void
486 -spa_history_log_version(spa_t *spa, history_internal_events_t event)
489 +spa_history_log_internal_ds(dsl_dataset_t *ds, const char *operation,
490 + dmu_tx_t *tx, const char *fmt, ...)
491 +{
492 + va_list adx;
493 + char namebuf[MAXNAMELEN];
494 + nvlist_t *nvl = fnvlist_alloc();
495 +
496 + ASSERT(tx != NULL);
497 +
498 + dsl_dataset_name(ds, namebuf);
499 + fnvlist_add_string(nvl, ZPOOL_HIST_DSNAME, namebuf);
500 + fnvlist_add_uint64(nvl, ZPOOL_HIST_DSID, ds->ds_object);
501 +
502 + va_start(adx, fmt);
503 + log_internal(nvl, operation, dsl_dataset_get_spa(ds), tx, fmt, adx);
504 + va_end(adx);
505 +}
506 +
507 +void
508 +spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
509 + dmu_tx_t *tx, const char *fmt, ...)
510 +{
511 + va_list adx;
512 + char namebuf[MAXNAMELEN];
513 + nvlist_t *nvl = fnvlist_alloc();
514 +
515 + ASSERT(tx != NULL);
516 +
517 + dsl_dir_name(dd, namebuf);
518 + fnvlist_add_string(nvl, ZPOOL_HIST_DSNAME, namebuf);
519 + fnvlist_add_uint64(nvl, ZPOOL_HIST_DSID,
520 + dd->dd_phys->dd_head_dataset_obj);
521 +
522 + va_start(adx, fmt);
523 + log_internal(nvl, operation, dd->dd_pool->dp_spa, tx, fmt, adx);
524 + va_end(adx);
525 +}
526 +
527 +void
528 +spa_history_log_version(spa_t *spa, const char *operation)
487 529 {
488 530 #ifdef _KERNEL
489 531 uint64_t current_vers = spa_version(spa);
490 532
491 - if (current_vers >= SPA_VERSION_ZPOOL_HISTORY) {
492 - spa_history_log_internal(event, spa, NULL,
493 - "pool spa %llu; zfs spa %llu; zpl %d; uts %s %s %s %s",
494 - (u_longlong_t)current_vers, SPA_VERSION, ZPL_VERSION,
495 - utsname.nodename, utsname.release, utsname.version,
496 - utsname.machine);
497 - }
498 - cmn_err(CE_CONT, "!%s version %llu pool %s using %llu",
499 - event == LOG_POOL_IMPORT ? "imported" :
500 - event == LOG_POOL_CREATE ? "created" : "accessed",
533 + spa_history_log_internal(spa, operation, NULL,
534 + "pool version %llu; software version %llu/%d; uts %s %s %s %s",
535 + (u_longlong_t)current_vers, SPA_VERSION, ZPL_VERSION,
536 + utsname.nodename, utsname.release, utsname.version,
537 + utsname.machine);
538 + cmn_err(CE_CONT, "!%s version %llu pool %s using %llu", operation,
501 539 (u_longlong_t)current_vers, spa_name(spa), SPA_VERSION);
502 540 #endif
503 541 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX