986 *
987 * Note that when the long hold is released, the dataset is still held but
988 * the pool is not held. The dataset may change arbitrarily during this time
989 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
990 * dataset except release it.
991 *
992 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
993 * or modifying operations.
994 *
995 * Modifying operations should generally use dsl_sync_task(). The synctask
996 * infrastructure enforces proper locking strategy with respect to the
997 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
998 *
999 * Read-only operations will manually hold the pool, then the dataset, obtain
1000 * information from the dataset, then release the pool and dataset.
1001 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
1002 * hold/rele.
1003 */
1004
1005 int
1006 dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
1007 {
1008 spa_t *spa;
1009 int error;
1010
1011 error = spa_open(name, &spa, tag);
1012 if (error == 0) {
1013 *dp = spa_get_dsl(spa);
1014 dsl_pool_config_enter(*dp, tag);
1015 }
1016 return (error);
1017 }
1018
1019 void
1020 dsl_pool_rele(dsl_pool_t *dp, void *tag)
1021 {
1022 dsl_pool_config_exit(dp, tag);
1023 spa_close(dp->dp_spa, tag);
1024 }
1025
1026 void
1027 dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1028 {
1029 /*
1030 * We use a "reentrant" reader-writer lock, but not reentrantly.
1031 *
1032 * The rrwlock can (with the track_all flag) track all reading threads,
1033 * which is very useful for debugging which code path failed to release
1034 * the lock, and for verifying that the *current* thread does hold
1035 * the lock.
1036 *
1037 * (Unlike a rwlock, which knows that N threads hold it for
1038 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
|
986 *
987 * Note that when the long hold is released, the dataset is still held but
988 * the pool is not held. The dataset may change arbitrarily during this time
989 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the
990 * dataset except release it.
991 *
992 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
993 * or modifying operations.
994 *
995 * Modifying operations should generally use dsl_sync_task(). The synctask
996 * infrastructure enforces proper locking strategy with respect to the
997 * dp_config_rwlock. See the comment above dsl_sync_task() for details.
998 *
999 * Read-only operations will manually hold the pool, then the dataset, obtain
1000 * information from the dataset, then release the pool and dataset.
1001 * dmu_objset_{hold,rele}() are convenience routines that also do the pool
1002 * hold/rele.
1003 */
1004
1005 int
1006 dsl_pool_hold_lock(const char *name, void *tag, dsl_pool_t **dp, int lock)
1007 {
1008 spa_t *spa;
1009 int error;
1010
1011 error = spa_open_lock(name, &spa, tag, lock);
1012 if (error == 0) {
1013 *dp = spa_get_dsl(spa);
1014 dsl_pool_config_enter(*dp, tag);
1015 }
1016 return (error);
1017 }
1018
1019 int
1020 dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
1021 {
1022 return (dsl_pool_hold_lock(name, tag, dp, 1));
1023 }
1024
1025 void
1026 dsl_pool_rele(dsl_pool_t *dp, void *tag)
1027 {
1028 dsl_pool_config_exit(dp, tag);
1029 spa_close(dp->dp_spa, tag);
1030 }
1031
1032 void
1033 dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
1034 {
1035 /*
1036 * We use a "reentrant" reader-writer lock, but not reentrantly.
1037 *
1038 * The rrwlock can (with the track_all flag) track all reading threads,
1039 * which is very useful for debugging which code path failed to release
1040 * the lock, and for verifying that the *current* thread does hold
1041 * the lock.
1042 *
1043 * (Unlike a rwlock, which knows that N threads hold it for
1044 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE
|