440 */
441 cp = strpbrk(search.spa_name, "/@");
442 if (cp != NULL)
443 *cp = '\0';
444
445 spa = avl_find(&spa_namespace_avl, &search, &where);
446
447 return (spa);
448 }
449
450 /*
451 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
452 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
453 * looking for potentially hung I/Os.
454 */
455 void
456 spa_deadman(void *arg)
457 {
458 spa_t *spa = arg;
459
460 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
461 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
462 ++spa->spa_deadman_calls);
463 if (zfs_deadman_enabled)
464 vdev_deadman(spa->spa_root_vdev);
465 }
466
467 /*
468 * Create an uninitialized spa_t with the given name. Requires
469 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
470 * exist by calling spa_lookup() first.
471 */
472 spa_t *
473 spa_add(const char *name, nvlist_t *config, const char *altroot)
474 {
475 spa_t *spa;
476 spa_config_dirent_t *dp;
477 cyc_handler_t hdlr;
478 cyc_time_t when;
479
1008
1009 spa_config_exit(spa, SCL_ALL, spa);
1010
1011 /*
1012 * Panic the system if the specified tag requires it. This
1013 * is useful for ensuring that configurations are updated
1014 * transactionally.
1015 */
1016 if (zio_injection_enabled)
1017 zio_handle_panic_injection(spa, tag, 0);
1018
1019 /*
1020 * Note: this txg_wait_synced() is important because it ensures
1021 * that there won't be more than one config change per txg.
1022 * This allows us to use the txg as the generation number.
1023 */
1024 if (error == 0)
1025 txg_wait_synced(spa->spa_dsl_pool, txg);
1026
1027 if (vd != NULL) {
1028 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
1029 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1030 vdev_free(vd);
1031 spa_config_exit(spa, SCL_ALL, spa);
1032 }
1033
1034 /*
1035 * If the config changed, update the config cache.
1036 */
1037 if (config_changed)
1038 spa_config_sync(spa, B_FALSE, B_TRUE);
1039 }
1040
1041 /*
1042 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1043 * locking of spa_vdev_enter(), we also want make sure the transactions have
1044 * synced to disk, and then update the global configuration cache with the new
1045 * information.
1046 */
1047 int
1048 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1685 offsetof(spa_aux_t, aux_avl));
1686
1687 spa_mode_global = mode;
1688
1689 #ifdef _KERNEL
1690 spa_arch_init();
1691 #else
1692 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1693 arc_procfd = open("/proc/self/ctl", O_WRONLY);
1694 if (arc_procfd == -1) {
1695 perror("could not enable watchpoints: "
1696 "opening /proc/self/ctl failed: ");
1697 } else {
1698 arc_watch = B_TRUE;
1699 }
1700 }
1701 #endif
1702
1703 refcount_init();
1704 unique_init();
1705 space_map_init();
1706 zio_init();
1707 dmu_init();
1708 zil_init();
1709 vdev_cache_stat_init();
1710 zfs_prop_init();
1711 zpool_prop_init();
1712 zpool_feature_init();
1713 spa_config_load();
1714 l2arc_start();
1715 }
1716
1717 void
1718 spa_fini(void)
1719 {
1720 l2arc_stop();
1721
1722 spa_evict_all();
1723
1724 vdev_cache_stat_fini();
1725 zil_fini();
1726 dmu_fini();
1727 zio_fini();
1728 space_map_fini();
1729 unique_fini();
1730 refcount_fini();
1731
1732 avl_destroy(&spa_namespace_avl);
1733 avl_destroy(&spa_spare_avl);
1734 avl_destroy(&spa_l2cache_avl);
1735
1736 cv_destroy(&spa_namespace_cv);
1737 mutex_destroy(&spa_namespace_lock);
1738 mutex_destroy(&spa_spare_lock);
1739 mutex_destroy(&spa_l2cache_lock);
1740 }
1741
1742 /*
1743 * Return whether this pool has slogs. No locking needed.
1744 * It's not a problem if the wrong answer is returned as it's only for
1745 * performance and not correctness
1746 */
1747 boolean_t
1748 spa_has_slogs(spa_t *spa)
|
440 */
441 cp = strpbrk(search.spa_name, "/@");
442 if (cp != NULL)
443 *cp = '\0';
444
445 spa = avl_find(&spa_namespace_avl, &search, &where);
446
447 return (spa);
448 }
449
450 /*
451 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
452 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
453 * looking for potentially hung I/Os.
454 */
455 void
456 spa_deadman(void *arg)
457 {
458 spa_t *spa = arg;
459
460 /*
461 * Disable the deadman timer if the pool is suspended.
462 */
463 if (spa_suspended(spa)) {
464 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
465 return;
466 }
467
468 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
469 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
470 ++spa->spa_deadman_calls);
471 if (zfs_deadman_enabled)
472 vdev_deadman(spa->spa_root_vdev);
473 }
474
475 /*
476 * Create an uninitialized spa_t with the given name. Requires
477 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
478 * exist by calling spa_lookup() first.
479 */
480 spa_t *
481 spa_add(const char *name, nvlist_t *config, const char *altroot)
482 {
483 spa_t *spa;
484 spa_config_dirent_t *dp;
485 cyc_handler_t hdlr;
486 cyc_time_t when;
487
1016
1017 spa_config_exit(spa, SCL_ALL, spa);
1018
1019 /*
1020 * Panic the system if the specified tag requires it. This
1021 * is useful for ensuring that configurations are updated
1022 * transactionally.
1023 */
1024 if (zio_injection_enabled)
1025 zio_handle_panic_injection(spa, tag, 0);
1026
1027 /*
1028 * Note: this txg_wait_synced() is important because it ensures
1029 * that there won't be more than one config change per txg.
1030 * This allows us to use the txg as the generation number.
1031 */
1032 if (error == 0)
1033 txg_wait_synced(spa->spa_dsl_pool, txg);
1034
1035 if (vd != NULL) {
1036 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1037 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1038 vdev_free(vd);
1039 spa_config_exit(spa, SCL_ALL, spa);
1040 }
1041
1042 /*
1043 * If the config changed, update the config cache.
1044 */
1045 if (config_changed)
1046 spa_config_sync(spa, B_FALSE, B_TRUE);
1047 }
1048
1049 /*
1050 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1051 * locking of spa_vdev_enter(), we also want make sure the transactions have
1052 * synced to disk, and then update the global configuration cache with the new
1053 * information.
1054 */
1055 int
1056 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1693 offsetof(spa_aux_t, aux_avl));
1694
1695 spa_mode_global = mode;
1696
1697 #ifdef _KERNEL
1698 spa_arch_init();
1699 #else
1700 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1701 arc_procfd = open("/proc/self/ctl", O_WRONLY);
1702 if (arc_procfd == -1) {
1703 perror("could not enable watchpoints: "
1704 "opening /proc/self/ctl failed: ");
1705 } else {
1706 arc_watch = B_TRUE;
1707 }
1708 }
1709 #endif
1710
1711 refcount_init();
1712 unique_init();
1713 range_tree_init();
1714 zio_init();
1715 dmu_init();
1716 zil_init();
1717 vdev_cache_stat_init();
1718 zfs_prop_init();
1719 zpool_prop_init();
1720 zpool_feature_init();
1721 spa_config_load();
1722 l2arc_start();
1723 }
1724
1725 void
1726 spa_fini(void)
1727 {
1728 l2arc_stop();
1729
1730 spa_evict_all();
1731
1732 vdev_cache_stat_fini();
1733 zil_fini();
1734 dmu_fini();
1735 zio_fini();
1736 range_tree_fini();
1737 unique_fini();
1738 refcount_fini();
1739
1740 avl_destroy(&spa_namespace_avl);
1741 avl_destroy(&spa_spare_avl);
1742 avl_destroy(&spa_l2cache_avl);
1743
1744 cv_destroy(&spa_namespace_cv);
1745 mutex_destroy(&spa_namespace_lock);
1746 mutex_destroy(&spa_spare_lock);
1747 mutex_destroy(&spa_l2cache_lock);
1748 }
1749
1750 /*
1751 * Return whether this pool has slogs. No locking needed.
1752 * It's not a problem if the wrong answer is returned as it's only for
1753 * performance and not correctness
1754 */
1755 boolean_t
1756 spa_has_slogs(spa_t *spa)
|