575 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
576 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
577
578 /*
579 * Free all children.
580 */
581 for (int c = 0; c < vd->vdev_children; c++)
582 vdev_free(vd->vdev_child[c]);
583
584 ASSERT(vd->vdev_child == NULL);
585 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
586
587 /*
588 * Discard allocation state.
589 */
590 if (vd->vdev_mg != NULL) {
591 vdev_metaslab_fini(vd);
592 metaslab_group_destroy(vd->vdev_mg);
593 }
594
595 ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
596 ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
597 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
598
599 /*
600 * Remove this vdev from its parent's child list.
601 */
602 vdev_remove_child(vd->vdev_parent, vd);
603
604 ASSERT(vd->vdev_parent == NULL);
605
606 /*
607 * Clean up vdev structure.
608 */
609 vdev_queue_fini(vd);
610 vdev_cache_fini(vd);
611
612 if (vd->vdev_path)
613 spa_strfree(vd->vdev_path);
614 if (vd->vdev_devid)
615 spa_strfree(vd->vdev_devid);
616 if (vd->vdev_physpath)
617 spa_strfree(vd->vdev_physpath);
1788
1789 void
1790 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1791 {
1792 spa_t *spa = vd->vdev_spa;
1793 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1794 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
1795 objset_t *mos = spa->spa_meta_objset;
1796 space_map_t smsync;
1797 kmutex_t smlock;
1798 dmu_buf_t *db;
1799 dmu_tx_t *tx;
1800
1801 ASSERT(!vd->vdev_ishole);
1802
1803 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1804
1805 if (vd->vdev_detached) {
1806 if (smo->smo_object != 0) {
1807 int err = dmu_object_free(mos, smo->smo_object, tx);
1808 ASSERT3U(err, ==, 0);
1809 smo->smo_object = 0;
1810 }
1811 dmu_tx_commit(tx);
1812 return;
1813 }
1814
1815 if (smo->smo_object == 0) {
1816 ASSERT(smo->smo_objsize == 0);
1817 ASSERT(smo->smo_alloc == 0);
1818 smo->smo_object = dmu_object_alloc(mos,
1819 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1820 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1821 ASSERT(smo->smo_object != 0);
1822 vdev_config_dirty(vd->vdev_top);
1823 }
1824
1825 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1826
1827 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1828 &smlock);
1988 }
1989
1990 /*
1991 * We don't actually check the pool state here. If it's in fact in
1992 * use by another pool, we update this fact on the fly when requested.
1993 */
1994 nvlist_free(label);
1995 return (0);
1996 }
1997
1998 void
1999 vdev_remove(vdev_t *vd, uint64_t txg)
2000 {
2001 spa_t *spa = vd->vdev_spa;
2002 objset_t *mos = spa->spa_meta_objset;
2003 dmu_tx_t *tx;
2004
2005 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2006
2007 if (vd->vdev_dtl_smo.smo_object) {
2008 ASSERT3U(vd->vdev_dtl_smo.smo_alloc, ==, 0);
2009 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
2010 vd->vdev_dtl_smo.smo_object = 0;
2011 }
2012
2013 if (vd->vdev_ms != NULL) {
2014 for (int m = 0; m < vd->vdev_ms_count; m++) {
2015 metaslab_t *msp = vd->vdev_ms[m];
2016
2017 if (msp == NULL || msp->ms_smo.smo_object == 0)
2018 continue;
2019
2020 ASSERT3U(msp->ms_smo.smo_alloc, ==, 0);
2021 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
2022 msp->ms_smo.smo_object = 0;
2023 }
2024 }
2025
2026 if (vd->vdev_ms_array) {
2027 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2028 vd->vdev_ms_array = 0;
2029 vd->vdev_ms_shift = 0;
2030 }
2031 dmu_tx_commit(tx);
2032 }
2033
2034 void
2035 vdev_sync_done(vdev_t *vd, uint64_t txg)
2036 {
2037 metaslab_t *msp;
2038 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2039
2040 ASSERT(!vd->vdev_ishole);
2278 * Prevent any future allocations.
2279 */
2280 metaslab_group_passivate(mg);
2281 (void) spa_vdev_state_exit(spa, vd, 0);
2282
2283 error = spa_offline_log(spa);
2284
2285 spa_vdev_state_enter(spa, SCL_ALLOC);
2286
2287 /*
2288 * Check to see if the config has changed.
2289 */
2290 if (error || generation != spa->spa_config_generation) {
2291 metaslab_group_activate(mg);
2292 if (error)
2293 return (spa_vdev_state_exit(spa,
2294 vd, error));
2295 (void) spa_vdev_state_exit(spa, vd, 0);
2296 goto top;
2297 }
2298 ASSERT3U(tvd->vdev_stat.vs_alloc, ==, 0);
2299 }
2300
2301 /*
2302 * Offline this device and reopen its top-level vdev.
2303 * If the top-level vdev is a log device then just offline
2304 * it. Otherwise, if this action results in the top-level
2305 * vdev becoming unusable, undo it and fail the request.
2306 */
2307 vd->vdev_offline = B_TRUE;
2308 vdev_reopen(tvd);
2309
2310 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2311 vdev_is_dead(tvd)) {
2312 vd->vdev_offline = B_FALSE;
2313 vdev_reopen(tvd);
2314 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2315 }
2316
2317 /*
2318 * Add the device back into the metaslab rotor so that
|
575 ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
576 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
577
578 /*
579 * Free all children.
580 */
581 for (int c = 0; c < vd->vdev_children; c++)
582 vdev_free(vd->vdev_child[c]);
583
584 ASSERT(vd->vdev_child == NULL);
585 ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
586
587 /*
588 * Discard allocation state.
589 */
590 if (vd->vdev_mg != NULL) {
591 vdev_metaslab_fini(vd);
592 metaslab_group_destroy(vd->vdev_mg);
593 }
594
595 ASSERT0(vd->vdev_stat.vs_space);
596 ASSERT0(vd->vdev_stat.vs_dspace);
597 ASSERT0(vd->vdev_stat.vs_alloc);
598
599 /*
600 * Remove this vdev from its parent's child list.
601 */
602 vdev_remove_child(vd->vdev_parent, vd);
603
604 ASSERT(vd->vdev_parent == NULL);
605
606 /*
607 * Clean up vdev structure.
608 */
609 vdev_queue_fini(vd);
610 vdev_cache_fini(vd);
611
612 if (vd->vdev_path)
613 spa_strfree(vd->vdev_path);
614 if (vd->vdev_devid)
615 spa_strfree(vd->vdev_devid);
616 if (vd->vdev_physpath)
617 spa_strfree(vd->vdev_physpath);
1788
1789 void
1790 vdev_dtl_sync(vdev_t *vd, uint64_t txg)
1791 {
1792 spa_t *spa = vd->vdev_spa;
1793 space_map_obj_t *smo = &vd->vdev_dtl_smo;
1794 space_map_t *sm = &vd->vdev_dtl[DTL_MISSING];
1795 objset_t *mos = spa->spa_meta_objset;
1796 space_map_t smsync;
1797 kmutex_t smlock;
1798 dmu_buf_t *db;
1799 dmu_tx_t *tx;
1800
1801 ASSERT(!vd->vdev_ishole);
1802
1803 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1804
1805 if (vd->vdev_detached) {
1806 if (smo->smo_object != 0) {
1807 int err = dmu_object_free(mos, smo->smo_object, tx);
1808 ASSERT0(err);
1809 smo->smo_object = 0;
1810 }
1811 dmu_tx_commit(tx);
1812 return;
1813 }
1814
1815 if (smo->smo_object == 0) {
1816 ASSERT(smo->smo_objsize == 0);
1817 ASSERT(smo->smo_alloc == 0);
1818 smo->smo_object = dmu_object_alloc(mos,
1819 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1820 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1821 ASSERT(smo->smo_object != 0);
1822 vdev_config_dirty(vd->vdev_top);
1823 }
1824
1825 mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL);
1826
1827 space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift,
1828 &smlock);
1988 }
1989
1990 /*
1991 * We don't actually check the pool state here. If it's in fact in
1992 * use by another pool, we update this fact on the fly when requested.
1993 */
1994 nvlist_free(label);
1995 return (0);
1996 }
1997
1998 void
1999 vdev_remove(vdev_t *vd, uint64_t txg)
2000 {
2001 spa_t *spa = vd->vdev_spa;
2002 objset_t *mos = spa->spa_meta_objset;
2003 dmu_tx_t *tx;
2004
2005 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2006
2007 if (vd->vdev_dtl_smo.smo_object) {
2008 ASSERT0(vd->vdev_dtl_smo.smo_alloc);
2009 (void) dmu_object_free(mos, vd->vdev_dtl_smo.smo_object, tx);
2010 vd->vdev_dtl_smo.smo_object = 0;
2011 }
2012
2013 if (vd->vdev_ms != NULL) {
2014 for (int m = 0; m < vd->vdev_ms_count; m++) {
2015 metaslab_t *msp = vd->vdev_ms[m];
2016
2017 if (msp == NULL || msp->ms_smo.smo_object == 0)
2018 continue;
2019
2020 ASSERT0(msp->ms_smo.smo_alloc);
2021 (void) dmu_object_free(mos, msp->ms_smo.smo_object, tx);
2022 msp->ms_smo.smo_object = 0;
2023 }
2024 }
2025
2026 if (vd->vdev_ms_array) {
2027 (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
2028 vd->vdev_ms_array = 0;
2029 vd->vdev_ms_shift = 0;
2030 }
2031 dmu_tx_commit(tx);
2032 }
2033
2034 void
2035 vdev_sync_done(vdev_t *vd, uint64_t txg)
2036 {
2037 metaslab_t *msp;
2038 boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
2039
2040 ASSERT(!vd->vdev_ishole);
2278 * Prevent any future allocations.
2279 */
2280 metaslab_group_passivate(mg);
2281 (void) spa_vdev_state_exit(spa, vd, 0);
2282
2283 error = spa_offline_log(spa);
2284
2285 spa_vdev_state_enter(spa, SCL_ALLOC);
2286
2287 /*
2288 * Check to see if the config has changed.
2289 */
2290 if (error || generation != spa->spa_config_generation) {
2291 metaslab_group_activate(mg);
2292 if (error)
2293 return (spa_vdev_state_exit(spa,
2294 vd, error));
2295 (void) spa_vdev_state_exit(spa, vd, 0);
2296 goto top;
2297 }
2298 ASSERT0(tvd->vdev_stat.vs_alloc);
2299 }
2300
2301 /*
2302 * Offline this device and reopen its top-level vdev.
2303 * If the top-level vdev is a log device then just offline
2304 * it. Otherwise, if this action results in the top-level
2305 * vdev becoming unusable, undo it and fail the request.
2306 */
2307 vd->vdev_offline = B_TRUE;
2308 vdev_reopen(tvd);
2309
2310 if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
2311 vdev_is_dead(tvd)) {
2312 vd->vdev_offline = B_FALSE;
2313 vdev_reopen(tvd);
2314 return (spa_vdev_state_exit(spa, NULL, EBUSY));
2315 }
2316
2317 /*
2318 * Add the device back into the metaslab rotor so that
|