Print this page
zpool import speedup


 616 }
 617 
 618 void
 619 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
 620 {
 621         ASSERT(list_is_empty(&zilog->zl_lwb_list));
 622         (void) zil_parse(zilog, zil_free_log_block,
 623             zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
 624 }
 625 
 626 int
 627 zil_claim(const char *osname, void *txarg)
 628 {
 629         dmu_tx_t *tx = txarg;
 630         uint64_t first_txg = dmu_tx_get_txg(tx);
 631         zilog_t *zilog;
 632         zil_header_t *zh;
 633         objset_t *os;
 634         int error;
 635 
 636         error = dmu_objset_own(osname, DMU_OST_ANY, B_FALSE, FTAG, &os);
 637         if (error != 0) {
 638                 cmn_err(CE_WARN, "can't open objset for %s", osname);
 639                 return (0);
 640         }
 641 
 642         zilog = dmu_objset_zil(os);
 643         zh = zil_header_in_syncing_context(zilog);
 644 
 645         if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
 646                 if (!BP_IS_HOLE(&zh->zh_log))
 647                         zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
 648                 BP_ZERO(&zh->zh_log);
 649                 dsl_dataset_dirty(dmu_objset_ds(os), tx);
 650                 dmu_objset_disown(os, FTAG);
 651                 return (0);
 652         }
 653 
 654         /*
 655          * Claim all log blocks if we haven't already done so, and remember
 656          * the highest claimed sequence number.  This ensures that if we can


 674         ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
 675         dmu_objset_disown(os, FTAG);
 676         return (0);
 677 }
 678 
 679 /*
 680  * Check the log by walking the log chain.
 681  * Checksum errors are ok as they indicate the end of the chain.
 682  * Any other error (no device or read failure) returns an error.
 683  */
 684 int
 685 zil_check_log_chain(const char *osname, void *tx)
 686 {
 687         zilog_t *zilog;
 688         objset_t *os;
 689         blkptr_t *bp;
 690         int error;
 691 
 692         ASSERT(tx == NULL);
 693 
 694         error = dmu_objset_hold(osname, FTAG, &os);
 695         if (error != 0) {
 696                 cmn_err(CE_WARN, "can't open objset for %s", osname);
 697                 return (0);
 698         }
 699 
 700         zilog = dmu_objset_zil(os);
 701         bp = (blkptr_t *)&zilog->zl_header->zh_log;
 702 
 703         /*
 704          * Check the first block and determine if it's on a log device
 705          * which may have been removed or faulted prior to loading this
 706          * pool.  If so, there's no point in checking the rest of the log
 707          * as its content should have already been synced to the pool.
 708          */
 709         if (!BP_IS_HOLE(bp)) {
 710                 vdev_t *vd;
 711                 boolean_t valid = B_TRUE;
 712 
 713                 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
 714                 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));




 616 }
 617 
 618 void
 619 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
 620 {
 621         ASSERT(list_is_empty(&zilog->zl_lwb_list));
 622         (void) zil_parse(zilog, zil_free_log_block,
 623             zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
 624 }
 625 
 626 int
 627 zil_claim(const char *osname, void *txarg)
 628 {
 629         dmu_tx_t *tx = txarg;
 630         uint64_t first_txg = dmu_tx_get_txg(tx);
 631         zilog_t *zilog;
 632         zil_header_t *zh;
 633         objset_t *os;
 634         int error;
 635 
 636         error = dmu_objset_own_nolock(osname, DMU_OST_ANY, B_FALSE, FTAG, &os);
 637         if (error != 0) {
 638                 cmn_err(CE_WARN, "can't open objset for %s", osname);
 639                 return (0);
 640         }
 641 
 642         zilog = dmu_objset_zil(os);
 643         zh = zil_header_in_syncing_context(zilog);
 644 
 645         if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
 646                 if (!BP_IS_HOLE(&zh->zh_log))
 647                         zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
 648                 BP_ZERO(&zh->zh_log);
 649                 dsl_dataset_dirty(dmu_objset_ds(os), tx);
 650                 dmu_objset_disown(os, FTAG);
 651                 return (0);
 652         }
 653 
 654         /*
 655          * Claim all log blocks if we haven't already done so, and remember
 656          * the highest claimed sequence number.  This ensures that if we can


 674         ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
 675         dmu_objset_disown(os, FTAG);
 676         return (0);
 677 }
 678 
 679 /*
 680  * Check the log by walking the log chain.
 681  * Checksum errors are ok as they indicate the end of the chain.
 682  * Any other error (no device or read failure) returns an error.
 683  */
 684 int
 685 zil_check_log_chain(const char *osname, void *tx)
 686 {
 687         zilog_t *zilog;
 688         objset_t *os;
 689         blkptr_t *bp;
 690         int error;
 691 
 692         ASSERT(tx == NULL);
 693 
 694         error = dmu_objset_hold_nolock(osname, FTAG, &os);
 695         if (error != 0) {
 696                 cmn_err(CE_WARN, "can't open objset for %s", osname);
 697                 return (0);
 698         }
 699 
 700         zilog = dmu_objset_zil(os);
 701         bp = (blkptr_t *)&zilog->zl_header->zh_log;
 702 
 703         /*
 704          * Check the first block and determine if it's on a log device
 705          * which may have been removed or faulted prior to loading this
 706          * pool.  If so, there's no point in checking the rest of the log
 707          * as its content should have already been synced to the pool.
 708          */
 709         if (!BP_IS_HOLE(bp)) {
 710                 vdev_t *vd;
 711                 boolean_t valid = B_TRUE;
 712 
 713                 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
 714                 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));