272 dmu_objset_evict(ds->ds_objset);
273
274 if (ds->ds_prev) {
275 dsl_dataset_drop_ref(ds->ds_prev, ds);
276 ds->ds_prev = NULL;
277 }
278
279 bplist_destroy(&ds->ds_pending_deadlist);
280 if (db != NULL) {
281 dsl_deadlist_close(&ds->ds_deadlist);
282 } else {
283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
285 }
286 if (ds->ds_dir)
287 dsl_dir_close(ds->ds_dir, ds);
288
289 ASSERT(!list_link_active(&ds->ds_synced_link));
290
291 mutex_destroy(&ds->ds_lock);
292 mutex_destroy(&ds->ds_recvlock);
293 mutex_destroy(&ds->ds_opening_lock);
294 rw_destroy(&ds->ds_rwlock);
295 cv_destroy(&ds->ds_exclusive_cv);
296
297 kmem_free(ds, sizeof (dsl_dataset_t));
298 }
299
300 static int
301 dsl_dataset_get_snapname(dsl_dataset_t *ds)
302 {
303 dsl_dataset_phys_t *headphys;
304 int err;
305 dmu_buf_t *headdbuf;
306 dsl_pool_t *dp = ds->ds_dir->dd_pool;
307 objset_t *mos = dp->dp_meta_objset;
308
309 if (ds->ds_snapname[0])
310 return (0);
311 if (ds->ds_phys->ds_next_snap_obj == 0)
312 return (0);
378
379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
380 if (err)
381 return (err);
382
383 /* Make sure dsobj has the correct object type. */
384 dmu_object_info_from_db(dbuf, &doi);
385 if (doi.doi_type != DMU_OT_DSL_DATASET)
386 return (EINVAL);
387
388 ds = dmu_buf_get_user(dbuf);
389 if (ds == NULL) {
390 dsl_dataset_t *winner;
391
392 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
393 ds->ds_dbuf = dbuf;
394 ds->ds_object = dsobj;
395 ds->ds_phys = dbuf->db_data;
396
397 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
398 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
399 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
400 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
401
402 rw_init(&ds->ds_rwlock, 0, 0, 0);
403 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
404
405 bplist_create(&ds->ds_pending_deadlist);
406 dsl_deadlist_open(&ds->ds_deadlist,
407 mos, ds->ds_phys->ds_deadlist_obj);
408
409 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
410 offsetof(dmu_sendarg_t, dsa_link));
411
412 if (err == 0) {
413 err = dsl_dir_open_obj(dp,
414 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
415 }
416 if (err) {
417 mutex_destroy(&ds->ds_lock);
418 mutex_destroy(&ds->ds_recvlock);
419 mutex_destroy(&ds->ds_opening_lock);
420 rw_destroy(&ds->ds_rwlock);
421 cv_destroy(&ds->ds_exclusive_cv);
422 bplist_destroy(&ds->ds_pending_deadlist);
423 dsl_deadlist_close(&ds->ds_deadlist);
424 kmem_free(ds, sizeof (dsl_dataset_t));
425 dmu_buf_rele(dbuf, tag);
426 return (err);
427 }
428
429 if (!dsl_dataset_is_snapshot(ds)) {
430 ds->ds_snapname[0] = '\0';
431 if (ds->ds_phys->ds_prev_snap_obj) {
432 err = dsl_dataset_get_ref(dp,
433 ds->ds_phys->ds_prev_snap_obj,
434 ds, &ds->ds_prev);
435 }
436 } else {
437 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
438 err = dsl_dataset_get_snapname(ds);
466 &ds->ds_quota, NULL);
467 }
468
469 if (need_lock)
470 rw_exit(&dp->dp_config_rwlock);
471 } else {
472 ds->ds_reserved = ds->ds_quota = 0;
473 }
474
475 if (err == 0) {
476 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
477 dsl_dataset_evict);
478 }
479 if (err || winner) {
480 bplist_destroy(&ds->ds_pending_deadlist);
481 dsl_deadlist_close(&ds->ds_deadlist);
482 if (ds->ds_prev)
483 dsl_dataset_drop_ref(ds->ds_prev, ds);
484 dsl_dir_close(ds->ds_dir, ds);
485 mutex_destroy(&ds->ds_lock);
486 mutex_destroy(&ds->ds_recvlock);
487 mutex_destroy(&ds->ds_opening_lock);
488 rw_destroy(&ds->ds_rwlock);
489 cv_destroy(&ds->ds_exclusive_cv);
490 kmem_free(ds, sizeof (dsl_dataset_t));
491 if (err) {
492 dmu_buf_rele(dbuf, tag);
493 return (err);
494 }
495 ds = winner;
496 } else {
497 ds->ds_fsid_guid =
498 unique_insert(ds->ds_phys->ds_fsid_guid);
499 }
500 }
501 ASSERT3P(ds->ds_dbuf, ==, dbuf);
502 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
503 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
504 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
505 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
506 mutex_enter(&ds->ds_lock);
529 if (dsl_pool_sync_context(dp)) {
530 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
531 return (0);
532 }
533
534 /*
535 * Normal users will hold the ds_rwlock as a READER until they
536 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
537 * drop their READER lock after they set the ds_owner field.
538 *
539 * If the dataset is being destroyed, the destroy thread will
540 * obtain a WRITER lock for exclusive access after it's done its
541 * open-context work and then change the ds_owner to
542 * dsl_reaper once destruction is assured. So threads
543 * may block here temporarily, until the "destructability" of
544 * the dataset is determined.
545 */
546 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
547 mutex_enter(&ds->ds_lock);
548 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
549 rw_exit(&dp->dp_config_rwlock);
550 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
551 if (DSL_DATASET_IS_DESTROYED(ds)) {
552 mutex_exit(&ds->ds_lock);
553 dsl_dataset_drop_ref(ds, tag);
554 rw_enter(&dp->dp_config_rwlock, RW_READER);
555 return (ENOENT);
556 }
557 /*
558 * The dp_config_rwlock lives above the ds_lock. And
559 * we need to check DSL_DATASET_IS_DESTROYED() while
560 * holding the ds_lock, so we have to drop and reacquire
561 * the ds_lock here.
562 */
563 mutex_exit(&ds->ds_lock);
564 rw_enter(&dp->dp_config_rwlock, RW_READER);
565 mutex_enter(&ds->ds_lock);
566 }
567 mutex_exit(&ds->ds_lock);
568 return (0);
569 }
570
571 int
572 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
573 dsl_dataset_t **dsp)
574 {
575 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
|
272 dmu_objset_evict(ds->ds_objset);
273
274 if (ds->ds_prev) {
275 dsl_dataset_drop_ref(ds->ds_prev, ds);
276 ds->ds_prev = NULL;
277 }
278
279 bplist_destroy(&ds->ds_pending_deadlist);
280 if (db != NULL) {
281 dsl_deadlist_close(&ds->ds_deadlist);
282 } else {
283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
285 }
286 if (ds->ds_dir)
287 dsl_dir_close(ds->ds_dir, ds);
288
289 ASSERT(!list_link_active(&ds->ds_synced_link));
290
291 mutex_destroy(&ds->ds_lock);
292 mutex_destroy(&ds->ds_opening_lock);
293 rw_destroy(&ds->ds_rwlock);
294 cv_destroy(&ds->ds_exclusive_cv);
295
296 kmem_free(ds, sizeof (dsl_dataset_t));
297 }
298
299 static int
300 dsl_dataset_get_snapname(dsl_dataset_t *ds)
301 {
302 dsl_dataset_phys_t *headphys;
303 int err;
304 dmu_buf_t *headdbuf;
305 dsl_pool_t *dp = ds->ds_dir->dd_pool;
306 objset_t *mos = dp->dp_meta_objset;
307
308 if (ds->ds_snapname[0])
309 return (0);
310 if (ds->ds_phys->ds_next_snap_obj == 0)
311 return (0);
377
378 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
379 if (err)
380 return (err);
381
382 /* Make sure dsobj has the correct object type. */
383 dmu_object_info_from_db(dbuf, &doi);
384 if (doi.doi_type != DMU_OT_DSL_DATASET)
385 return (EINVAL);
386
387 ds = dmu_buf_get_user(dbuf);
388 if (ds == NULL) {
389 dsl_dataset_t *winner;
390
391 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
392 ds->ds_dbuf = dbuf;
393 ds->ds_object = dsobj;
394 ds->ds_phys = dbuf->db_data;
395
396 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
397 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
398 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
399
400 rw_init(&ds->ds_rwlock, 0, 0, 0);
401 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
402
403 bplist_create(&ds->ds_pending_deadlist);
404 dsl_deadlist_open(&ds->ds_deadlist,
405 mos, ds->ds_phys->ds_deadlist_obj);
406
407 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
408 offsetof(dmu_sendarg_t, dsa_link));
409
410 if (err == 0) {
411 err = dsl_dir_open_obj(dp,
412 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
413 }
414 if (err) {
415 mutex_destroy(&ds->ds_lock);
416 mutex_destroy(&ds->ds_opening_lock);
417 rw_destroy(&ds->ds_rwlock);
418 cv_destroy(&ds->ds_exclusive_cv);
419 bplist_destroy(&ds->ds_pending_deadlist);
420 dsl_deadlist_close(&ds->ds_deadlist);
421 kmem_free(ds, sizeof (dsl_dataset_t));
422 dmu_buf_rele(dbuf, tag);
423 return (err);
424 }
425
426 if (!dsl_dataset_is_snapshot(ds)) {
427 ds->ds_snapname[0] = '\0';
428 if (ds->ds_phys->ds_prev_snap_obj) {
429 err = dsl_dataset_get_ref(dp,
430 ds->ds_phys->ds_prev_snap_obj,
431 ds, &ds->ds_prev);
432 }
433 } else {
434 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
435 err = dsl_dataset_get_snapname(ds);
463 &ds->ds_quota, NULL);
464 }
465
466 if (need_lock)
467 rw_exit(&dp->dp_config_rwlock);
468 } else {
469 ds->ds_reserved = ds->ds_quota = 0;
470 }
471
472 if (err == 0) {
473 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
474 dsl_dataset_evict);
475 }
476 if (err || winner) {
477 bplist_destroy(&ds->ds_pending_deadlist);
478 dsl_deadlist_close(&ds->ds_deadlist);
479 if (ds->ds_prev)
480 dsl_dataset_drop_ref(ds->ds_prev, ds);
481 dsl_dir_close(ds->ds_dir, ds);
482 mutex_destroy(&ds->ds_lock);
483 mutex_destroy(&ds->ds_opening_lock);
484 rw_destroy(&ds->ds_rwlock);
485 cv_destroy(&ds->ds_exclusive_cv);
486 kmem_free(ds, sizeof (dsl_dataset_t));
487 if (err) {
488 dmu_buf_rele(dbuf, tag);
489 return (err);
490 }
491 ds = winner;
492 } else {
493 ds->ds_fsid_guid =
494 unique_insert(ds->ds_phys->ds_fsid_guid);
495 }
496 }
497 ASSERT3P(ds->ds_dbuf, ==, dbuf);
498 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
499 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
500 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
501 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
502 mutex_enter(&ds->ds_lock);
525 if (dsl_pool_sync_context(dp)) {
526 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
527 return (0);
528 }
529
530 /*
531 * Normal users will hold the ds_rwlock as a READER until they
532 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
533 * drop their READER lock after they set the ds_owner field.
534 *
535 * If the dataset is being destroyed, the destroy thread will
536 * obtain a WRITER lock for exclusive access after it's done its
537 * open-context work and then change the ds_owner to
538 * dsl_reaper once destruction is assured. So threads
539 * may block here temporarily, until the "destructability" of
540 * the dataset is determined.
541 */
542 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
543 mutex_enter(&ds->ds_lock);
544 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
545 int rc;
546
547 rw_exit(&dp->dp_config_rwlock);
548 rc = cv_wait_sig(&ds->ds_exclusive_cv, &ds->ds_lock);
549 if (!rc || DSL_DATASET_IS_DESTROYED(ds)) {
550 mutex_exit(&ds->ds_lock);
551 dsl_dataset_drop_ref(ds, tag);
552 rw_enter(&dp->dp_config_rwlock, RW_READER);
553 return (rc ? ENOENT : EINTR);
554 }
555 /*
556 * The dp_config_rwlock lives above the ds_lock. And
557 * we need to check DSL_DATASET_IS_DESTROYED() while
558 * holding the ds_lock, so we have to drop and reacquire
559 * the ds_lock here.
560 */
561 mutex_exit(&ds->ds_lock);
562 rw_enter(&dp->dp_config_rwlock, RW_READER);
563 mutex_enter(&ds->ds_lock);
564 }
565 mutex_exit(&ds->ds_lock);
566 return (0);
567 }
568
569 int
570 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
571 dsl_dataset_t **dsp)
572 {
573 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
|