Print this page
3956 ::vdev -r should work with pipelines
3957 ztest should update the cachefile before killing itself
3958 multiple scans can lead to partial resilvering
3959 ddt entries are not always resilvered
3960 dsl_scan can skip over dedup-ed blocks if physical birth != logical birth
3961 freed gang blocks are not resilvered and can cause pool to suspend
3962 ztest should print out zfs debug buffer before exiting
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
*** 18,28 ****
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
! * Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
*/
/*
--- 18,28 ----
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
! * Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
*/
/*
*** 765,774 ****
--- 765,784 ----
static void
ztest_kill(ztest_shared_t *zs)
{
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
+
+ /*
+ * Before we kill off ztest, make sure that the config is updated.
+ * See comment above spa_config_sync().
+ */
+ mutex_enter(&spa_namespace_lock);
+ spa_config_sync(ztest_spa, B_FALSE, B_FALSE);
+ mutex_exit(&spa_namespace_lock);
+
+ zfs_dbgmsg_print(FTAG);
(void) kill(getpid(), SIGKILL);
}
static uint64_t
ztest_random(uint64_t range)
*** 2729,2739 ****
nvlist_t *root;
uint64_t leaves;
uint64_t leaf, top;
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
! size_t oldsize, newsize;
char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
--- 2739,2749 ----
nvlist_t *root;
uint64_t leaves;
uint64_t leaf, top;
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
! uint64_t oldsize, newsize;
char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
*** 2888,2899 ****
/* XXX workaround 6690467 */
if (error != expected_error && expected_error != EBUSY) {
fatal(0, "attach (%s %llu, %s %llu, %d) "
"returned %d, expected %d",
! oldpath, (longlong_t)oldsize, newpath,
! (longlong_t)newsize, replacing, error, expected_error);
}
VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
--- 2898,2909 ----
/* XXX workaround 6690467 */
if (error != expected_error && expected_error != EBUSY) {
fatal(0, "attach (%s %llu, %s %llu, %d) "
"returned %d, expected %d",
! oldpath, oldsize, newpath,
! newsize, replacing, error, expected_error);
}
VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
*** 4799,4809 ****
* then we only allow faults on the device that is
* resilvering.
*/
if (vd0 != NULL && maxfaults != 1 &&
(!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
! vd0->vdev_resilvering)) {
/*
* Make vd0 explicitly claim to be unreadable,
* or unwriteable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
--- 4809,4819 ----
* then we only allow faults on the device that is
* resilvering.
*/
if (vd0 != NULL && maxfaults != 1 &&
(!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
! vd0->vdev_resilver_txg != 0)) {
/*
* Make vd0 explicitly claim to be unreadable,
* or unwriteable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
*** 5649,5658 ****
--- 5659,5669 ----
txg_wait_synced(spa_get_dsl(spa), 0);
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
+ zfs_dbgmsg_print(FTAG);
umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
/* Kill the resume thread */
ztest_exiting = B_TRUE;