1468 VDEV_ALLOC_L2CACHE) == 0);
1469 ASSERT(vd != NULL);
1470 newvdevs[i] = vd;
1471
1472 /*
1473 * Commit this vdev as an l2cache device,
1474 * even if it fails to open.
1475 */
1476 spa_l2cache_add(vd);
1477
1478 vd->vdev_top = vd;
1479 vd->vdev_aux = sav;
1480
1481 spa_l2cache_activate(vd);
1482
1483 if (vdev_open(vd) != 0)
1484 continue;
1485
1486 (void) vdev_validate_aux(vd);
1487
1488 if (!vdev_is_dead(vd))
1489 l2arc_add_vdev(spa, vd);
1490 }
1491 }
1492
1493 /*
1494 * Purge vdevs that were dropped
1495 */
1496 for (i = 0; i < oldnvdevs; i++) {
1497 uint64_t pool;
1498
1499 vd = oldvdevs[i];
1500 if (vd != NULL) {
1501 ASSERT(vd->vdev_isl2cache);
1502
1503 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1504 pool != 0ULL && l2arc_vdev_present(vd))
1505 l2arc_remove_vdev(vd);
1506 vdev_clear_stats(vd);
1507 vdev_free(vd);
1508 }
1509 }
1510
1511 if (oldvdevs)
2679 spa_async_request(spa, SPA_ASYNC_RESILVER);
2680
2681 /*
2682 * Log the fact that we booted up (so that we can detect if
2683 * we rebooted in the middle of an operation).
2684 */
2685 spa_history_log_version(spa, "open");
2686
2687 /*
2688 * Delete any inconsistent datasets.
2689 */
2690 (void) dmu_objset_find(spa_name(spa),
2691 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2692
2693 /*
2694 * Clean up any stale temporary dataset userrefs.
2695 */
2696 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
2697 }
2698
2699 return (0);
2700 }
2701
2702 static int
2703 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2704 {
2705 int mode = spa->spa_mode;
2706
2707 spa_unload(spa);
2708 spa_deactivate(spa);
2709
2710 spa->spa_load_max_txg--;
2711
2712 spa_activate(spa, mode);
2713 spa_async_suspend(spa);
2714
2715 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2716 }
2717
2718 /*
5628 */
5629 if (tasks & SPA_ASYNC_PROBE) {
5630 spa_vdev_state_enter(spa, SCL_NONE);
5631 spa_async_probe(spa, spa->spa_root_vdev);
5632 (void) spa_vdev_state_exit(spa, NULL, 0);
5633 }
5634
5635 /*
5636 * If any devices are done replacing, detach them.
5637 */
5638 if (tasks & SPA_ASYNC_RESILVER_DONE)
5639 spa_vdev_resilver_done(spa);
5640
5641 /*
5642 * Kick off a resilver.
5643 */
5644 if (tasks & SPA_ASYNC_RESILVER)
5645 dsl_resilver_restart(spa->spa_dsl_pool, 0);
5646
5647 /*
5648 * Let the world know that we're done.
5649 */
5650 mutex_enter(&spa->spa_async_lock);
5651 spa->spa_async_thread = NULL;
5652 cv_broadcast(&spa->spa_async_cv);
5653 mutex_exit(&spa->spa_async_lock);
5654 thread_exit();
5655 }
5656
5657 void
5658 spa_async_suspend(spa_t *spa)
5659 {
5660 mutex_enter(&spa->spa_async_lock);
5661 spa->spa_async_suspended++;
5662 while (spa->spa_async_thread != NULL)
5663 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5664 mutex_exit(&spa->spa_async_lock);
5665 }
5666
5667 void
|
1468 VDEV_ALLOC_L2CACHE) == 0);
1469 ASSERT(vd != NULL);
1470 newvdevs[i] = vd;
1471
1472 /*
1473 * Commit this vdev as an l2cache device,
1474 * even if it fails to open.
1475 */
1476 spa_l2cache_add(vd);
1477
1478 vd->vdev_top = vd;
1479 vd->vdev_aux = sav;
1480
1481 spa_l2cache_activate(vd);
1482
1483 if (vdev_open(vd) != 0)
1484 continue;
1485
1486 (void) vdev_validate_aux(vd);
1487
1488 if (!vdev_is_dead(vd)) {
1489 boolean_t do_rebuild = B_FALSE;
1490
1491 (void) nvlist_lookup_boolean_value(l2cache[i],
1492 ZPOOL_CONFIG_L2CACHE_PERSISTENT,
1493 &do_rebuild);
1494 l2arc_add_vdev(spa, vd, do_rebuild);
1495 }
1496 }
1497 }
1498
1499 /*
1500 * Purge vdevs that were dropped
1501 */
1502 for (i = 0; i < oldnvdevs; i++) {
1503 uint64_t pool;
1504
1505 vd = oldvdevs[i];
1506 if (vd != NULL) {
1507 ASSERT(vd->vdev_isl2cache);
1508
1509 if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
1510 pool != 0ULL && l2arc_vdev_present(vd))
1511 l2arc_remove_vdev(vd);
1512 vdev_clear_stats(vd);
1513 vdev_free(vd);
1514 }
1515 }
1516
1517 if (oldvdevs)
2685 spa_async_request(spa, SPA_ASYNC_RESILVER);
2686
2687 /*
2688 * Log the fact that we booted up (so that we can detect if
2689 * we rebooted in the middle of an operation).
2690 */
2691 spa_history_log_version(spa, "open");
2692
2693 /*
2694 * Delete any inconsistent datasets.
2695 */
2696 (void) dmu_objset_find(spa_name(spa),
2697 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
2698
2699 /*
2700 * Clean up any stale temporary dataset userrefs.
2701 */
2702 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
2703 }
2704
2705 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2706
2707 return (0);
2708 }
2709
2710 static int
2711 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
2712 {
2713 int mode = spa->spa_mode;
2714
2715 spa_unload(spa);
2716 spa_deactivate(spa);
2717
2718 spa->spa_load_max_txg--;
2719
2720 spa_activate(spa, mode);
2721 spa_async_suspend(spa);
2722
2723 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
2724 }
2725
2726 /*
5636 */
5637 if (tasks & SPA_ASYNC_PROBE) {
5638 spa_vdev_state_enter(spa, SCL_NONE);
5639 spa_async_probe(spa, spa->spa_root_vdev);
5640 (void) spa_vdev_state_exit(spa, NULL, 0);
5641 }
5642
5643 /*
5644 * If any devices are done replacing, detach them.
5645 */
5646 if (tasks & SPA_ASYNC_RESILVER_DONE)
5647 spa_vdev_resilver_done(spa);
5648
5649 /*
5650 * Kick off a resilver.
5651 */
5652 if (tasks & SPA_ASYNC_RESILVER)
5653 dsl_resilver_restart(spa->spa_dsl_pool, 0);
5654
5655 /*
5656 * Kick off L2 cache rebuilding.
5657 */
5658 if (tasks & SPA_ASYNC_L2CACHE_REBUILD)
5659 l2arc_spa_rebuild_start(spa);
5660
5661 /*
5662 * Let the world know that we're done.
5663 */
5664 mutex_enter(&spa->spa_async_lock);
5665 spa->spa_async_thread = NULL;
5666 cv_broadcast(&spa->spa_async_cv);
5667 mutex_exit(&spa->spa_async_lock);
5668 thread_exit();
5669 }
5670
5671 void
5672 spa_async_suspend(spa_t *spa)
5673 {
5674 mutex_enter(&spa->spa_async_lock);
5675 spa->spa_async_suspended++;
5676 while (spa->spa_async_thread != NULL)
5677 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
5678 mutex_exit(&spa->spa_async_lock);
5679 }
5680
5681 void
|