From 5a058deb32d10d6d0cc2fbd0110fdc7e9f42cf0e Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 11 Sep 2020 15:27:39 +0800 Subject: [PATCH 01/19] core/state/snapshot: introduce snapshot journal version --- core/state/snapshot/journal.go | 20 ++++++++++++++++++++ core/state/snapshot/snapshot.go | 6 ++++++ 2 files changed, 26 insertions(+) diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index fc1053f818d6..4f47d7c94097 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -33,6 +33,8 @@ import ( "github.com/ethereum/go-ethereum/trie" ) +const journalVersion uint64 = 0 + // journalGenerator is a disk layer entry containing the generator progress marker. type journalGenerator struct { Wiping bool // Whether the database was in progress of being wiped @@ -79,6 +81,9 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, // we've already generated the snapshot or are in progress only journal := rawdb.ReadSnapshotJournal(diskdb) if len(journal) == 0 { + // Scenario: the disk layer is still generating while there + // is no journal be persisted at all. In this case, try to + // reuse the exsitent disk layer if possible. return nil, errors.New("missing or corrupted snapshot journal") } r := rlp.NewStream(bytes.NewReader(journal), 0) @@ -96,6 +101,16 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, // Entire snapshot journal loaded, sanity check the head and return // Journal doesn't exist, don't worry if it's not supposed to if head := snapshot.Root(); head != root { + // If the loaded snapshot is not matched with current + // state root, try to recover the "almost broken" snapshot. + + // Scenario a: Geth was crashed and then restart, in this + // case the head is rewound to the point with available + // state(trie). So the head block is may lower than snapshot head + + // Scenario b: Geth was crashed and then restart, but the + // rewound head is higher than snapshot. + return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) } // Everything loaded correctly, resume any suspended operations @@ -203,6 +218,11 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { if dl.stale { return common.Hash{}, ErrSnapshotStale } + // Firstly write out the root of disk layer. The + // root is the state root of some canonical block. + if err := rlp.Encode(buffer, dl.root); err != nil { + return common.Hash{}, err + } // Write out the generator marker entry := journalGenerator{ Done: dl.genMarker == nil, diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 4f496b4ff64f..c7869d7decf9 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -554,7 +555,12 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { t.lock.Lock() defer t.lock.Unlock() + // Firstly write out the metadata of journal journal := new(bytes.Buffer) + if err := rlp.Encode(journal, journalVersion); err != nil { + return common.Hash{}, err + } + // Secondly write out the journal of each layer in reverse order. base, err := snap.(snapshot).Journal(journal) if err != nil { return common.Hash{}, err From c0d3cce1354c323cea3d6dd740500b530b4ae332 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 14 Sep 2020 14:50:36 +0800 Subject: [PATCH 02/19] core: update the disk layer in an atomic way --- core/blockchain.go | 5 +++++ core/state/snapshot/snapshot.go | 23 +++++++++-------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 356340ae19df..689e9a7d0f57 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -285,6 +285,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par if err := bc.SetHead(head.NumberU64()); err != nil { return nil, err } + // If the blockchain is already rewound to a lower point, in + // this case we hold the strong assumpution that the snapshot + // journal is broken if it's exsitent. + // + // todo return all deleted blocks. } // Ensure that a previous crash in SetHead doesn't leave extra ancients if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index c7869d7decf9..27a91a33376e 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -416,6 +416,9 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { // diffToDisk merges a bottom-most diff into the persistent disk layer underneath // it. The method will panic if called onto a non-bottom-most diff layer. +// +// The disk layer persistence should be operated in an atomic way. All updates should +// be discarded if the whole transition if not finished. func diffToDisk(bottom *diffLayer) *diskLayer { var ( base = bottom.parent.(*diskLayer) @@ -428,8 +431,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer { base.genAbort <- abort stats = <-abort } - // Start by temporarily deleting the current snapshot block marker. This - // ensures that in the case of a crash, the entire snapshot is invalidated. + // Put the deletion in the batch writer, flush all updates in the final step. rawdb.DeleteSnapshotRoot(batch) // Mark the original base as stale as we're going to create a new wrapper @@ -472,12 +474,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer { base.cache.Set(hash[:], data) snapshotCleanAccountWriteMeter.Mark(int64(len(data))) - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - log.Crit("Failed to write account snapshot", "err", err) - } - batch.Reset() - } snapshotFlushAccountItemMeter.Mark(1) snapshotFlushAccountSizeMeter.Mark(int64(len(data))) } @@ -506,15 +502,14 @@ func diffToDisk(bottom *diffLayer) *diskLayer { snapshotFlushStorageItemMeter.Mark(1) snapshotFlushStorageSizeMeter.Mark(int64(len(data))) } - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - log.Crit("Failed to write storage snapshot", "err", err) - } - batch.Reset() - } } // Update the snapshot block marker and write any remainder data rawdb.WriteSnapshotRoot(batch, bottom.root) + + // todo write snapshot generator + + // Flush all the updates in the single db operation. Ensure the + // disk layer transition is atomic. if err := batch.Write(); err != nil { log.Crit("Failed to write leftover snapshot", "err", err) } From c37009fedfe42dbdfc876322dec44a28da8d3979 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 14 Sep 2020 15:21:34 +0800 Subject: [PATCH 03/19] core: persist the disk layer generator periodically --- core/blockchain.go | 56 +++++++++++--- core/rawdb/accessors_snapshot.go | 23 ++++++ core/rawdb/schema.go | 3 + core/state/snapshot/journal.go | 128 ++++++++++++++++++++++--------- core/state/snapshot/snapshot.go | 26 ++++++- 5 files changed, 185 insertions(+), 51 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 689e9a7d0f57..d28a28d986dc 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -281,15 +281,28 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par // Make sure the state associated with the block is available head := bc.CurrentBlock() if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { + // Head state is missing, before the state recovery, find out the + // disk layer point of snapshot(if it's enabled). Make sure the + // rewound point is lower then disk layer. + var diskRoot common.Hash + if bc.cacheConfig.SnapshotLimit > 0 { + diskRoot = rawdb.ReadSnapshotRoot(bc.db) + } log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) - if err := bc.SetHead(head.NumberU64()); err != nil { - return nil, err + if diskRoot != (common.Hash{}) { + var marked bool + if err := bc.SetHeadWithCondition(head.NumberU64(), func(block *types.Block, canStop *bool) { + if block.Root() == diskRoot && !marked { + marked, *canStop = true, true + } + }); err != nil { + return nil, err + } + } else { + if err := bc.SetHead(head.NumberU64()); err != nil { + return nil, err + } } - // If the blockchain is already rewound to a lower point, in - // this case we hold the strong assumpution that the snapshot - // journal is broken if it's exsitent. - // - // todo return all deleted blocks. } // Ensure that a previous crash in SetHead doesn't leave extra ancients if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { @@ -445,10 +458,10 @@ func (bc *BlockChain) loadLastState() error { return nil } -// SetHead rewinds the local chain to a new head. Depending on whether the node +// setHead rewinds the local chain to a new head. Depending on whether the node // was fast synced or full synced and in which state, the method will try to // delete minimal data from disk whilst retaining chain consistency. -func (bc *BlockChain) SetHead(head uint64) error { +func (bc *BlockChain) setHead(head uint64, onDelete func(block *types.Block, canStop *bool)) error { bc.chainmu.Lock() defer bc.chainmu.Unlock() @@ -468,8 +481,14 @@ func (bc *BlockChain) SetHead(head uint64) error { newHeadBlock = bc.genesisBlock } else { // Block exists, keep rewinding until we find one with state + var canStop = onDelete == nil for { if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { + // If there is a passed callback for determining + // whether the rewinding should stop, call it. + if onDelete != nil { + onDelete(newHeadBlock, &canStop) + } log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) if pivot == nil || newHeadBlock.NumberU64() > *pivot { newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) @@ -477,10 +496,15 @@ func (bc *BlockChain) SetHead(head uint64) error { } else { log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) newHeadBlock = bc.genesisBlock + canStop = true } } - log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) - break + if canStop { + log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + break + } + log.Debug("Skip the block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding } } rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash()) @@ -563,6 +587,16 @@ func (bc *BlockChain) SetHead(head uint64) error { return bc.loadLastState() } +// SetHead is a wrapper of setHead for external usage. +func (bc *BlockChain) SetHead(head uint64) error { + return bc.setHead(head, nil) +} + +// SetHead is a wrapper of setHead for external usage. +func (bc *BlockChain) SetHeadWithCondition(head uint64, onDelete func(block *types.Block, canStop *bool)) error { + return bc.setHead(head, onDelete) +} + // FastSyncCommitHead sets the current head block to the one defined by the hash // irrelevant what the chain contents were prior. func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index ecd4e65978ee..a83e7cd69eb6 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -118,3 +118,26 @@ func DeleteSnapshotJournal(db ethdb.KeyValueWriter) { log.Crit("Failed to remove snapshot journal", "err", err) } } + +// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at +// the last shutdown. +func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(snapshotGeneratorKey) + return data +} + +// WriteSnapshotGenerator stores the serialized snapshot generator to save at +// shutdown. +func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) { + if err := db.Put(snapshotGeneratorKey, generator); err != nil { + log.Crit("Failed to store snapshot generator", "err", err) + } +} + +// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at +// the last shutdown +func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) { + if err := db.Delete(snapshotGeneratorKey); err != nil { + log.Crit("Failed to remove snapshot generator", "err", err) + } +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index e2b093a34a6c..7d43e7189881 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -51,6 +51,9 @@ var ( // snapshotJournalKey tracks the in-memory diff layers across restarts. snapshotJournalKey = []byte("SnapshotJournal") + // snapshotGeneratorKey tracks the snapshot generation marker across restarts. + snapshotGeneratorKey = []byte("SnapshotGenerator") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 4f47d7c94097..4e7058b2caa7 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -63,6 +63,80 @@ type journalStorage struct { Vals [][]byte } +// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format. +func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) { + // Retrieve the journal, for legacy journal it must exist since even for + // 0 layer it stores whether we've already generated the snapshot or are + // in progress only. + journal := rawdb.ReadSnapshotJournal(db) + if len(journal) == 0 { + return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal") + } + r := rlp.NewStream(bytes.NewReader(journal), 0) + + // Read the snapshot generation progress for the disk layer + var generator journalGenerator + if err := r.Decode(&generator); err != nil { + return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err) + } + // Load all the snapshot diffs from the journal + snapshot, err := loadDiffLayer(base, r) + if err != nil { + return nil, generator, err + } + return snapshot, generator, nil +} + +// loadAndParseJournal tries to parse the snapshot journal in latest format. +func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) { + // Retrieve the disk layer generator. It must exist, no matter the + // snapshot is fully generated or not. Otherwise the entire disk + // layer should be discarded. + generatorBlob := rawdb.ReadSnapshotGenerator(db) + if len(generatorBlob) == 0 { + return nil, journalGenerator{}, errors.New("missing snapshot generator") + } + var generator journalGenerator + if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil { + return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err) + } + // Retrieve the diff layer journal. It's possible that the journal is + // not existent, e.g. the disk layer is generating while that the Geth + // crashes without persisting the diff journal. + // So if there is no journal, we just discard all diffs and try to recover + // them later. + journal := rawdb.ReadSnapshotJournal(db) + if len(journal) == 0 { + return base, generator, nil + } + r := rlp.NewStream(bytes.NewReader(journal), 0) + + // Firstly, resolve the first element as the journal version + version, err := r.Uint() + if err != nil { + return nil, journalGenerator{}, err + } + if version != journalVersion { + return nil, journalGenerator{}, errors.New("version mismatch") + } + // Secondly, resolve the disk layer root, ensure it's continuous + // with disk layer. + var root common.Hash + if err := r.Decode(&root); err != nil { + return nil, journalGenerator{}, errors.New("missing disk layer root") + } + // If they are not continuous, keep the disk layer only. + if !bytes.Equal(root.Bytes(), base.root.Bytes()) { + return base, generator, nil + } + // Load all the snapshot diffs from the journal + snapshot, err := loadDiffLayer(base, r) + if err != nil { + return nil, journalGenerator{}, err + } + return snapshot, generator, nil +} + // loadSnapshot loads a pre-existing state snapshot backed by a key-value store. func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) { // Retrieve the block number and hash of the snapshot, failing if no snapshot @@ -77,41 +151,22 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, cache: fastcache.New(cache * 1024 * 1024), root: baseRoot, } - // Retrieve the journal, it must exist since even for 0 layer it stores whether - // we've already generated the snapshot or are in progress only - journal := rawdb.ReadSnapshotJournal(diskdb) - if len(journal) == 0 { - // Scenario: the disk layer is still generating while there - // is no journal be persisted at all. In this case, try to - // reuse the exsitent disk layer if possible. - return nil, errors.New("missing or corrupted snapshot journal") - } - r := rlp.NewStream(bytes.NewReader(journal), 0) - - // Read the snapshot generation progress for the disk layer - var generator journalGenerator - if err := r.Decode(&generator); err != nil { - return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err) + snapshot, generator, err := loadAndParseJournal(diskdb, base) + if err != nil { + snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base) } - // Load all the snapshot diffs from the journal - snapshot, err := loadDiffLayer(base, r) if err != nil { return nil, err } - // Entire snapshot journal loaded, sanity check the head and return - // Journal doesn't exist, don't worry if it's not supposed to + // Entire snapshot journal loaded, sanity check the head. If the loaded + // snapshot is not matched with current state root, ensure the state is + // below the snapshot so that the broken snapshot can be recovered. + // + // Possible scenario: Geth was crashed without persisting journal and then + // restart, the head is rewound to the point with available state(trie) which + // is below the snapshot. if head := snapshot.Root(); head != root { - // If the loaded snapshot is not matched with current - // state root, try to recover the "almost broken" snapshot. - - // Scenario a: Geth was crashed and then restart, in this - // case the head is rewound to the point with available - // state(trie). So the head block is may lower than snapshot head - - // Scenario b: Geth was crashed and then restart, but the - // rewound head is higher than snapshot. - - return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) + log.Warn("Snapshot is not synced, wait recovery") } // Everything loaded correctly, resume any suspended operations if !generator.Done { @@ -218,12 +273,9 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { if dl.stale { return common.Hash{}, ErrSnapshotStale } - // Firstly write out the root of disk layer. The - // root is the state root of some canonical block. - if err := rlp.Encode(buffer, dl.root); err != nil { - return common.Hash{}, err - } - // Write out the generator marker + // Write out the generator marker. Note it's a standalone disk layer generator + // which is not mixed with journal. It's ok if the generator is persisted while + // journal is not. entry := journalGenerator{ Done: dl.genMarker == nil, Marker: dl.genMarker, @@ -234,9 +286,11 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { entry.Slots = stats.slots entry.Storage = uint64(stats.storage) } - if err := rlp.Encode(buffer, entry); err != nil { + blob, err := rlp.EncodeToBytes(entry) + if err != nil { return common.Hash{}, err } + rawdb.WriteSnapshotGenerator(dl.diskdb, blob) return dl.root, nil } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 27a91a33376e..a703a3172ce0 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -199,7 +199,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm } // waitBuild blocks until the snapshot finishes rebuilding. This method is meant -// to be used by tests to ensure we're testing what we believe we are. +// to be used by tests to ensure we're testing what we believe we are. func (t *Tree) waitBuild() { // Find the rebuild termination channel var done chan struct{} @@ -506,7 +506,22 @@ func diffToDisk(bottom *diffLayer) *diskLayer { // Update the snapshot block marker and write any remainder data rawdb.WriteSnapshotRoot(batch, bottom.root) - // todo write snapshot generator + // Write out the generator marker + entry := journalGenerator{ + Done: base.genMarker == nil, + Marker: base.genMarker, + } + if stats != nil { + entry.Wiping = (stats.wiping != nil) + entry.Accounts = stats.accounts + entry.Slots = stats.slots + entry.Storage = uint64(stats.storage) + } + blob, err := rlp.EncodeToBytes(entry) + if err != nil { + panic(fmt.Sprintf("Failed to RLP encode generator %v", err)) + } + rawdb.WriteSnapshotGenerator(batch, blob) // Flush all the updates in the single db operation. Ensure the // disk layer transition is atomic. @@ -555,7 +570,12 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { if err := rlp.Encode(journal, journalVersion); err != nil { return common.Hash{}, err } - // Secondly write out the journal of each layer in reverse order. + // Secondly write out the disk layer root, ensure the + // diff journal is continuous with disk. + if err := rlp.Encode(journal, t.disklayer().root); err != nil { + return common.Hash{}, err + } + // Finally write out the journal of each layer in reverse order. base, err := snap.(snapshot).Journal(journal) if err != nil { return common.Hash{}, err From 08bc64ecca10fab522d43d21f0ae4d828496fc31 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 15 Sep 2020 19:32:08 +0800 Subject: [PATCH 04/19] core/state/snapshot: improve logging --- core/state/snapshot/disklayer_test.go | 76 +++++++++++++++++++++++++++ core/state/snapshot/journal.go | 22 ++++---- core/state/snapshot/snapshot.go | 7 +-- 3 files changed, 92 insertions(+), 13 deletions(-) diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 8460cd332f99..232bf7e1a2bc 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -18,6 +18,7 @@ package snapshot import ( "bytes" + "github.com/ethereum/go-ethereum/rlp" "io/ioutil" "os" "testing" @@ -429,6 +430,81 @@ func TestDiskPartialMerge(t *testing.T) { } } +// Tests that when the bottom-most diff layer is merged into the disk +// layer whether the corresponding generator is persisted correctly. +func TestDiskGeneratorPersistence(t *testing.T) { + var ( + accOne = randomHash() + accTwo = randomHash() + accOneSlotOne = randomHash() + accOneSlotTwo = randomHash() + + accThree = randomHash() + accThreeSlot = randomHash() + baseRoot = randomHash() + diffRoot = randomHash() + diffTwoRoot = randomHash() + genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) + ) + // Testing scenario 1, the disk layer is still under the construction. + db := rawdb.NewMemoryDatabase() + + rawdb.WriteAccountSnapshot(db, accOne, accOne[:]) + rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:]) + rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:]) + rawdb.WriteSnapshotRoot(db, baseRoot) + + // Create a disk layer based on all above updates + snaps := &Tree{ + layers: map[common.Hash]snapshot{ + baseRoot: &diskLayer{ + diskdb: db, + cache: fastcache.New(500 * 1024), + root: baseRoot, + genMarker: genMarker, + }, + }, + } + // Modify or delete some accounts, flatten everything onto disk + if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{ + accTwo: accTwo[:], + }, nil); err != nil { + t.Fatalf("failed to update snapshot tree: %v", err) + } + if err := snaps.Cap(diffRoot, 0); err != nil { + t.Fatalf("failed to flatten snapshot tree: %v", err) + } + blob := rawdb.ReadSnapshotGenerator(db) + var generator journalGenerator + if err := rlp.DecodeBytes(blob, &generator); err != nil { + t.Fatalf("Failed to decode snapshot generator %v", err) + } + if bytes.Compare(generator.Marker, genMarker) != 0 { + t.Fatalf("Generator marker is not matched") + } + // Test senario 2, the disk layer is fully generated + // Modify or delete some accounts, flatten everything onto disk + if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{ + accThree: accThree.Bytes(), + }, map[common.Hash]map[common.Hash][]byte{ + accThree: {accThreeSlot: accThreeSlot.Bytes()}, + }); err != nil { + t.Fatalf("failed to update snapshot tree: %v", err) + } + diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) + diskLayer.genMarker = nil // Construction finished + if err := snaps.Cap(diffTwoRoot, 0); err != nil { + t.Fatalf("failed to flatten snapshot tree: %v", err) + } + blob = rawdb.ReadSnapshotGenerator(db) + if err := rlp.DecodeBytes(blob, &generator); err != nil { + t.Fatalf("Failed to decode snapshot generator %v", err) + } + if len(generator.Marker) != 0 { + t.Fatalf("Failed to update snapshot generator") + } +} + // Tests that merging something into a disk layer persists it into the database // and invalidates any previously written and cached values, discarding anything // after the in-progress generation marker. diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 4e7058b2caa7..932e440a16cf 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -91,7 +91,7 @@ func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapsho func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) { // Retrieve the disk layer generator. It must exist, no matter the // snapshot is fully generated or not. Otherwise the entire disk - // layer should be discarded. + // layer is invalid. generatorBlob := rawdb.ReadSnapshotGenerator(db) if len(generatorBlob) == 0 { return nil, journalGenerator{}, errors.New("missing snapshot generator") @@ -103,8 +103,8 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // Retrieve the diff layer journal. It's possible that the journal is // not existent, e.g. the disk layer is generating while that the Geth // crashes without persisting the diff journal. - // So if there is no journal, we just discard all diffs and try to recover - // them later. + // So if there is no journal, or the journal is not matched with disk + // layer, we just discard all diffs and try to recover them later. journal := rawdb.ReadSnapshotJournal(db) if len(journal) == 0 { return base, generator, nil @@ -117,7 +117,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou return nil, journalGenerator{}, err } if version != journalVersion { - return nil, journalGenerator{}, errors.New("version mismatch") + return nil, journalGenerator{}, fmt.Errorf("journal version mismatch, want %d got %v", journalVersion, version) } // Secondly, resolve the disk layer root, ensure it's continuous // with disk layer. @@ -125,7 +125,9 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou if err := r.Decode(&root); err != nil { return nil, journalGenerator{}, errors.New("missing disk layer root") } - // If they are not continuous, keep the disk layer only. + // The diff journal is not matched with disk, discard them. + // It can happen that Geth crashes without persisting the latest + // diff journal. if !bytes.Equal(root.Bytes(), base.root.Bytes()) { return base, generator, nil } @@ -159,14 +161,14 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, return nil, err } // Entire snapshot journal loaded, sanity check the head. If the loaded - // snapshot is not matched with current state root, ensure the state is - // below the snapshot so that the broken snapshot can be recovered. + // snapshot is not matched with current state root, print a warning log. // // Possible scenario: Geth was crashed without persisting journal and then - // restart, the head is rewound to the point with available state(trie) which - // is below the snapshot. + // restart, the head is rewound to the point with available state(trie) + // which is below the snapshot. In this case the snapshot can be recovered + // by re-executing blocks but right now it's unavailable. if head := snapshot.Root(); head != root { - log.Warn("Snapshot is not synced, wait recovery") + log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root) } // Everything loaded correctly, resume any suspended operations if !generator.Done { diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index a703a3172ce0..98f4f4448e36 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -169,9 +169,10 @@ type Tree struct { // store (with a number of memory layers from a journal), ensuring that the head // of the snapshot matches the expected one. // -// If the snapshot is missing or inconsistent, the entirety is deleted and will -// be reconstructed from scratch based on the tries in the key-value store, on a -// background thread. +// If the snapshot is missing or the disk layer is broken, the entire is deleted +// and will be reconstructed from scratch based on the tries in the key-value +// store, on a background thread. If the memory layers from the journal is not +// continuous with disk layer or the journal is missing, all diffs will be discarded. func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree { // Create a new, empty snapshot tree snap := &Tree{ From abe54d9a6ac6abffe83d3d443fada9e0a1c2fb90 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 22 Sep 2020 14:36:27 +0800 Subject: [PATCH 05/19] core/state/snapshot: forcibly ensure the legacy snapshot is matched --- core/state/snapshot/disklayer_test.go | 2 +- core/state/snapshot/journal.go | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 232bf7e1a2bc..aa4df9df4461 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -18,7 +18,6 @@ package snapshot import ( "bytes" - "github.com/ethereum/go-ethereum/rlp" "io/ioutil" "os" "testing" @@ -29,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/rlp" ) // reverse reverses the contents of a byte slice. It's used to update random accs diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 932e440a16cf..e0b2eb8a05c9 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -153,21 +153,27 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, cache: fastcache.New(cache * 1024 * 1024), root: baseRoot, } + var legacy bool snapshot, generator, err := loadAndParseJournal(diskdb, base) if err != nil { snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base) + legacy = true } if err != nil { return nil, err } // Entire snapshot journal loaded, sanity check the head. If the loaded - // snapshot is not matched with current state root, print a warning log. + // snapshot is not matched with current state root, print a warning log + // or discard the entire snapshot it's legacy snapshot. // // Possible scenario: Geth was crashed without persisting journal and then // restart, the head is rewound to the point with available state(trie) // which is below the snapshot. In this case the snapshot can be recovered // by re-executing blocks but right now it's unavailable. if head := snapshot.Root(); head != root { + if legacy { + return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) + } log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root) } // Everything loaded correctly, resume any suspended operations From 5b23601276281bc737d3b7be3597fadb65f2ed0e Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 22 Sep 2020 15:21:27 +0800 Subject: [PATCH 06/19] core/state/snapshot: add debug logs --- core/state/snapshot/generate.go | 1 + core/state/snapshot/journal.go | 6 ++++++ core/state/snapshot/snapshot.go | 1 + 3 files changed, 8 insertions(+) diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index a6b3e4420d8c..566f7d94a81e 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -112,6 +112,7 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i genAbort: make(chan chan *generatorStats), } go base.generate(&generatorStats{wiping: wiper, start: time.Now()}) + log.Debug("Start snapshot generation", "root", root) return base } diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index e0b2eb8a05c9..831abd3788ea 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -107,6 +107,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // layer, we just discard all diffs and try to recover them later. journal := rawdb.ReadSnapshotJournal(db) if len(journal) == 0 { + log.Debug("Loaded snapshot journal", "diskroot", base.root, "diff", "missing") return base, generator, nil } r := rlp.NewStream(bytes.NewReader(journal), 0) @@ -129,6 +130,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // It can happen that Geth crashes without persisting the latest // diff journal. if !bytes.Equal(root.Bytes(), base.root.Bytes()) { + log.Debug("Loaded snapshot journal", "diskroot", base.root, "diff", "unmatched") return base, generator, nil } // Load all the snapshot diffs from the journal @@ -136,6 +138,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou if err != nil { return nil, journalGenerator{}, err } + log.Debug("Loaded snapshot journal", "diskroot", base.root, "diffhead", snapshot.Root()) return snapshot, generator, nil } @@ -156,6 +159,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, var legacy bool snapshot, generator, err := loadAndParseJournal(diskdb, base) if err != nil { + log.Debug("Failed to load latest-format journal", "error", err) snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base) legacy = true } @@ -298,6 +302,7 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { if err != nil { return common.Hash{}, err } + log.Debug("Journalled disk layer", "root", dl.root, "complete", dl.genMarker == nil) rawdb.WriteSnapshotGenerator(dl.diskdb, blob) return dl.root, nil } @@ -348,5 +353,6 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { if err := rlp.Encode(buffer, storage); err != nil { return common.Hash{}, err } + log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root()) return base, nil } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 98f4f4448e36..6fc879c31b49 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -529,6 +529,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer { if err := batch.Write(); err != nil { log.Crit("Failed to write leftover snapshot", "err", err) } + log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil) res := &diskLayer{ root: bottom.root, cache: base.cache, From d05d8954aadb46fe7474a2eb41481c94838ab379 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 22 Sep 2020 19:39:47 +0800 Subject: [PATCH 07/19] core, tests: fix tests and special recovery case --- core/blockchain.go | 8 ++++---- core/blockchain_repair_test.go | 14 ++++++++++---- core/blockchain_sethead_test.go | 17 ++++++++++++----- core/state/snapshot/journal.go | 11 +++++++++-- core/state/snapshot/snapshot.go | 7 ++++--- tests/state_test_util.go | 2 +- 6 files changed, 40 insertions(+), 19 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index d28a28d986dc..eda9a0eb1919 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -279,7 +279,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par return nil, err } // Make sure the state associated with the block is available - head := bc.CurrentBlock() + head, recoverSnapshot := bc.CurrentBlock(), false if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -287,6 +287,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par var diskRoot common.Hash if bc.cacheConfig.SnapshotLimit > 0 { diskRoot = rawdb.ReadSnapshotRoot(bc.db) + recoverSnapshot = true } log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) if diskRoot != (common.Hash{}) { @@ -357,7 +358,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par } // Load any existing snapshot, regenerating it if loading failed if bc.cacheConfig.SnapshotLimit > 0 { - bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait) + bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait, recoverSnapshot) } // Take ownership of this particular state go bc.update() @@ -496,10 +497,9 @@ func (bc *BlockChain) setHead(head uint64, onDelete func(block *types.Block, can } else { log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) newHeadBlock = bc.genesisBlock - canStop = true } } - if canStop { + if canStop || newHeadBlock.NumberU64() == 0 { log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 27903dd06b35..d201bae14615 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -21,6 +21,7 @@ package core import ( + "fmt" "io/ioutil" "math/big" "os" @@ -1556,7 +1557,7 @@ func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { func testRepair(t *testing.T, tt *rewindTest) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - //fmt.Println(tt.dump(true)) + fmt.Println(tt.dump(true)) // Create a temporary persistent database datadir, err := ioutil.TempDir("", "") @@ -1573,10 +1574,15 @@ func testRepair(t *testing.T, tt *rewindTest) { // Initialize a fresh chain var ( - genesis = new(Genesis).MustCommit(db) - engine = ethash.NewFullFaker() + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + cacheConfig = defaultCacheConfig ) - chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if !tt.enableSnapshot { + cacheConfig.SnapshotLimit = 0 + cacheConfig.SnapshotWait = false + } + chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index dc1368ff4b48..1f605e60144e 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -37,6 +37,7 @@ import ( // rewindTest is a test case for chain rollback upon user request. type rewindTest struct { + enableSnapshot bool // Flag whether the snapshot is enabled canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier) sidechainBlocks int // Number of blocks to generate for the side chain (lighter) freezeThreshold uint64 // Block number until which to move things into the freezer @@ -55,7 +56,8 @@ type rewindTest struct { func (tt *rewindTest) dump(crash bool) string { buffer := new(strings.Builder) - fmt.Fprint(buffer, "Chain:\n G") + fmt.Fprint(buffer, fmt.Sprintf("Chain:", tt.enableSnapshot)) + fmt.Fprint(buffer, "\n G") for i := 0; i < tt.canonicalBlocks; i++ { fmt.Fprintf(buffer, "->C%d", i+1) } @@ -1747,7 +1749,7 @@ func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { func testSetHead(t *testing.T, tt *rewindTest) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - //fmt.Println(tt.dump(false)) + fmt.Println(tt.dump(false)) // Create a temporary persistent database datadir, err := ioutil.TempDir("", "") @@ -1764,10 +1766,15 @@ func testSetHead(t *testing.T, tt *rewindTest) { // Initialize a fresh chain var ( - genesis = new(Genesis).MustCommit(db) - engine = ethash.NewFullFaker() + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + cacheConfig = defaultCacheConfig ) - chain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if !tt.enableSnapshot { + cacheConfig.SnapshotLimit = 0 + cacheConfig.SnapshotWait = false + } + chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 831abd3788ea..2b12ecca0de6 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -143,7 +143,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou } // loadSnapshot loads a pre-existing state snapshot backed by a key-value store. -func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) { +func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, error) { // Retrieve the block number and hash of the snapshot, failing if no snapshot // is present in the database (or crashed mid-update). baseRoot := rawdb.ReadSnapshotRoot(diskdb) @@ -175,9 +175,16 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, // which is below the snapshot. In this case the snapshot can be recovered // by re-executing blocks but right now it's unavailable. if head := snapshot.Root(); head != root { - if legacy { + // If it's legacy snapshot, or it's new-format snapshot but + // it's not in recovery mode, returns the error here for + // rebuilding the entire snapshot forcibly. + if legacy || !recovery { return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) } + // It's in snapshot recovery, the assumption is held that + // the disk layer is higher than chain head. It can be + // eventually recovered when the chain head is beyond the + // disk layer. log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root) } // Everything loaded correctly, resume any suspended operations diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 6fc879c31b49..fd1daeab835d 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -172,8 +172,9 @@ type Tree struct { // If the snapshot is missing or the disk layer is broken, the entire is deleted // and will be reconstructed from scratch based on the tries in the key-value // store, on a background thread. If the memory layers from the journal is not -// continuous with disk layer or the journal is missing, all diffs will be discarded. -func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree { +// continuous with disk layer or the journal is missing, all diffs will be discarded +// iff it's in "recovery" mode, otherwise rebuild is mandatory. +func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree { // Create a new, empty snapshot tree snap := &Tree{ diskdb: diskdb, @@ -185,7 +186,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm defer snap.waitBuild() } // Attempt to load a previously persisted snapshot and rebuild one if failed - head, err := loadSnapshot(diskdb, triedb, cache, root) + head, err := loadSnapshot(diskdb, triedb, cache, root, recovery) if err != nil { log.Warn("Failed to load snapshot, regenerating", "err", err) snap.Rebuild(root) diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 238d204745f1..28a5313129df 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -235,7 +235,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo var snaps *snapshot.Tree if snapshotter { - snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false) + snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false, false) } statedb, _ = state.New(root, sdb, snaps) return snaps, statedb From 103c3ea453ace052b52a774e525ab6e3f385fa4e Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 23 Sep 2020 14:36:56 +0800 Subject: [PATCH 08/19] core: polish --- core/blockchain.go | 5 +++-- core/state/snapshot/journal.go | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index eda9a0eb1919..4aad0a051fef 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -283,14 +283,14 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the - // rewound point is lower then disk layer. + // rewound point is lower than disk layer. var diskRoot common.Hash if bc.cacheConfig.SnapshotLimit > 0 { diskRoot = rawdb.ReadSnapshotRoot(bc.db) recoverSnapshot = true } - log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) if diskRoot != (common.Hash{}) { + log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) var marked bool if err := bc.SetHeadWithCondition(head.NumberU64(), func(block *types.Block, canStop *bool) { if block.Root() == diskRoot && !marked { @@ -300,6 +300,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par return nil, err } } else { + log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) if err := bc.SetHead(head.NumberU64()); err != nil { return nil, err } diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 2b12ecca0de6..70ddecbb7400 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -159,7 +159,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, var legacy bool snapshot, generator, err := loadAndParseJournal(diskdb, base) if err != nil { - log.Debug("Failed to load latest-format journal", "error", err) + log.Debug("Failed to load new-format journal", "error", err) snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base) legacy = true } @@ -182,10 +182,10 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) } // It's in snapshot recovery, the assumption is held that - // the disk layer is higher than chain head. It can be - // eventually recovered when the chain head is beyond the + // the disk layer is always higher than chain head. It can + // be eventually recovered when the chain head beyonds the // disk layer. - log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root) + log.Warn("Snapshot is not continous with chain, wait recovery", "snaproot", head, "chainroot", root) } // Everything loaded correctly, resume any suspended operations if !generator.Done { From 211407e36e7a4a0d81d7715ba30f7a98b64b0c18 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 23 Sep 2020 20:22:16 +0800 Subject: [PATCH 09/19] core: add more blockchain tests for snapshot recovery --- core/blockchain.go | 17 +- core/blockchain_repair_test.go | 17 +- core/blockchain_sethead_test.go | 20 +- core/blockchain_snapshot_test.go | 459 +++++++++++++++++++++++++++++++ core/state/snapshot/journal.go | 47 ++++ core/state/snapshot/snapshot.go | 35 +++ 6 files changed, 572 insertions(+), 23 deletions(-) create mode 100644 core/blockchain_snapshot_test.go diff --git a/core/blockchain.go b/core/blockchain.go index 4aad0a051fef..a51821c23c40 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -207,9 +207,10 @@ type BlockChain struct { processor Processor // Block transaction processor interface vmConfig vm.Config - badBlocks *lru.Cache // Bad block cache - shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. - terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. + badBlocks *lru.Cache // Bad block cache + shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. + terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. + writeLegacyJournal bool // Testing flag used to flush the snapshot journal in legacy format. } // NewBlockChain returns a fully initialised block chain using information @@ -980,8 +981,14 @@ func (bc *BlockChain) Stop() { var snapBase common.Hash if bc.snaps != nil { var err error - if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { - log.Error("Failed to journal state snapshot", "err", err) + if bc.writeLegacyJournal { + if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil { + log.Error("Failed to journal state snapshot", "err", err) + } + } else { + if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { + log.Error("Failed to journal state snapshot", "err", err) + } } } // Ensure the state of a recent block is also stored to disk before exiting. diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index d201bae14615..7ab2e9652e44 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -21,11 +21,11 @@ package core import ( - "fmt" "io/ioutil" "math/big" "os" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -1556,8 +1556,8 @@ func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { func testRepair(t *testing.T, tt *rewindTest) { // It's hard to follow the test case, visualize the input - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - fmt.Println(tt.dump(true)) + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump(true)) // Create a temporary persistent database datadir, err := ioutil.TempDir("", "") @@ -1576,12 +1576,13 @@ func testRepair(t *testing.T, tt *rewindTest) { var ( genesis = new(Genesis).MustCommit(db) engine = ethash.NewFullFaker() - cacheConfig = defaultCacheConfig + cacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, // Disable snapshot + } ) - if !tt.enableSnapshot { - cacheConfig.SnapshotLimit = 0 - cacheConfig.SnapshotWait = false - } chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 1f605e60144e..e4f81c9c9daf 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -26,6 +26,7 @@ import ( "os" "strings" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -37,7 +38,6 @@ import ( // rewindTest is a test case for chain rollback upon user request. type rewindTest struct { - enableSnapshot bool // Flag whether the snapshot is enabled canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier) sidechainBlocks int // Number of blocks to generate for the side chain (lighter) freezeThreshold uint64 // Block number until which to move things into the freezer @@ -56,8 +56,7 @@ type rewindTest struct { func (tt *rewindTest) dump(crash bool) string { buffer := new(strings.Builder) - fmt.Fprint(buffer, fmt.Sprintf("Chain:", tt.enableSnapshot)) - fmt.Fprint(buffer, "\n G") + fmt.Fprint(buffer, "Chain:\n G") for i := 0; i < tt.canonicalBlocks; i++ { fmt.Fprintf(buffer, "->C%d", i+1) } @@ -1748,8 +1747,8 @@ func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { func testSetHead(t *testing.T, tt *rewindTest) { // It's hard to follow the test case, visualize the input - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - fmt.Println(tt.dump(false)) + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump(false)) // Create a temporary persistent database datadir, err := ioutil.TempDir("", "") @@ -1768,12 +1767,13 @@ func testSetHead(t *testing.T, tt *rewindTest) { var ( genesis = new(Genesis).MustCommit(db) engine = ethash.NewFullFaker() - cacheConfig = defaultCacheConfig + cacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, // Disable snapshot + } ) - if !tt.enableSnapshot { - cacheConfig.SnapshotLimit = 0 - cacheConfig.SnapshotWait = false - } chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go new file mode 100644 index 000000000000..8ea27eb37053 --- /dev/null +++ b/core/blockchain_snapshot_test.go @@ -0,0 +1,459 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Tests that abnormal program termination (i.e.crash) and restart can recovery +// the snapshot properly if the snapshot is enabled. + +package core + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" +) + +// snapshotTest is a test case for snapshot recovery. It can be used for +// simulating these scenarios: +// (i) Geth restarts normally with valid legacy snapshot +// (ii) Geth restarts normally with valid new-format snapshot +// (iii) Geth restarts after the crash, with broken legacy snapshot +// (iv) Geth restarts after the crash, with broken new-format snapshot +// (v) Geth restarts normally, but it's requested to be rewound to a lower point via SetHead +// (vi) Geth restarts normally with a stale snapshot +type snapshotTest struct { + legacy bool // Flag whether the loaded snapshot is in legacy format + crash bool // Flag whether the Geth restarts from the previous crash + gapped int // Number of blocks to insert without enabling snapshot + setHead uint64 // Block number to set head back to + + chainBlocks int // Number of blocks to generate for the canonical chain + snapshotBlock uint64 // Block number of the relevant snapshot disk layer + commitBlock uint64 // Block number for which to commit the state to disk + + expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis) + expHeadHeader uint64 // Block number of the expected head header + expHeadFastBlock uint64 // Block number of the expected head fast sync block + expHeadBlock uint64 // Block number of the expected head full block + expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer +} + +// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot +// journal will be persisted correctly. In this case no snapshot recovery is +// required. +func TestRestartWithNewSnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: false, + crash: false, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 8, + expSnapshotBottom: 0, // Initial disk layer built from genesis + }) +} + +// Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown, +// all snapshot journal will be persisted correctly. In this case no snapshot +// recovery is required. +func TestRestartWithLegacySnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: true, + crash: false, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 8, + expSnapshotBottom: 0, // Initial disk layer built from genesis + }) +} + +// Tests a Geth was crashed and restarts with a broken snapshot. +// In this case the chain head should be rewound to the point +// with available state. And also the new head should must be lower +// than disk layer. But there is no committed point so the chain +// should be rewound to genesis. +func TestNoCommitCrashWithNewSnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: false, + crash: true, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }) +} + +// Tests a Geth was crashed and restarts with a broken snapshot. +// In this case the chain head should be rewound to the point +// with available state. And also the new head should must be lower +// than disk layer. But there is only a low committed point so the +// chain should be rewound to committed point. +func TestLowCommitCrashWithNewSnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: false, + crash: true, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 2, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 2, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }) +} + +// Tests a Geth was crashed and restarts with a broken snapshot. +// In this case the chain head should be rewound to the point +// with available state. And also the new head should must be lower +// than disk layer. But there is only a high committed point so the +// chain should be rewound to genesis. +func TestHighCommitCrashWithNewSnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: false, + crash: true, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 6, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }) +} + +// Tests a Geth was crashed and restarts with a broken and "legacy format" +// snapshot. In this case the entire legacy snapshot should be discared +// and rebuild from the new chain head. +func TestNoCommitCrashWithLegacySnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: true, + crash: true, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }) +} + +// Tests a Geth was crashed and restarts with a broken and "legacy format" +// snapshot. In this case the entire legacy snapshot should be discared +// and rebuild from the new chain head. +// +// The special thing in this test case is: we only persist the snasphot +// journal during the shutdown previously. In this case after the crash +// there is no journal at all. +func TestLowCommitCrashWithLegacySnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: true, + crash: true, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 2, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 2, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }) +} + +// Tests a Geth was crashed and restarts with a broken and "legacy format" +// snapshot. In this case the entire legacy snapshot should be discared +// and rebuild from the new chain head. +func TestHighCommitCrashWithLegacySnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: true, + crash: true, + gapped: 0, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 6, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }) +} + +// Tests a Geth was running with snapshot enabled. Then restarts without +// enabling snapshot and after that re-enable the snapshot again. In this +// case the snapshot should be rebuilt with latest chain head. +func TestGappedNewSnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: false, + crash: false, + gapped: 2, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, // rebuild from the head + }) +} + +// Tests a Geth was running with leagcy snapshot enabled. Then restarts +// without enabling snapshot and after that re-enable the snapshot again. +// In this case the snapshot should be rebuilt with latest chain head. +func TestGappedLegacySnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: true, + crash: false, + gapped: 2, + setHead: 0, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, // rebuild from the head + }) +} + +// Tests the Geth was running with snapshot enabled and then a resetHead +// is required. Check after the reset everything is rollbacked correctly. +func TestSetHeadWithNewSnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: false, + crash: false, + gapped: 0, + setHead: 4, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 2, + expCanonicalBlocks: 4, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 2, + expSnapshotBottom: 2, // rebuild from the head + }) +} + +// Tests the Geth was running with legacy snapshot enabled and then a resetHead +// is required. Check after the reset everything is rollbacked correctly. +func TestSetHeadWithLegacySnapshot(t *testing.T) { + testSnapshot(t, &snapshotTest{ + legacy: true, + crash: false, + gapped: 0, + setHead: 4, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 2, + expCanonicalBlocks: 4, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 2, + expSnapshotBottom: 2, // rebuild from the head + }) +} + +func testSnapshot(t *testing.T, tt *snapshotTest) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump(true)) + + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + defer db.Close() // Might double close, should be fine + + // Initialize a fresh chain + var ( + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + gendb = rawdb.NewMemoryDatabase() + + // Snapshot is enabled, the first snapshot is created from the Genesis. + // The snapshot memory allowance is 256MB, it means no snapshot flush + // will happen during the block insertion. + cacheConfig = defaultCacheConfig + ) + chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, tt.chainBlocks, func(i int, b *BlockGen) {}) + + // Insert the blocks with configured settings. + var breakpoints []uint64 + if tt.commitBlock > tt.snapshotBlock { + breakpoints = append(breakpoints, tt.snapshotBlock, tt.commitBlock) + } else { + breakpoints = append(breakpoints, tt.commitBlock, tt.snapshotBlock) + } + var startPoint uint64 + for _, point := range breakpoints { + if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + startPoint = point + + if tt.commitBlock > 0 && tt.commitBlock == point { + chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil) + } + if tt.snapshotBlock > 0 && tt.snapshotBlock == point { + chain.snaps.Cap(blocks[point-1].Root(), 0) + + diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() + if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { + t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) + } + } + } + if _, err := chain.InsertChain(blocks[startPoint:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + // Set the flag for writing legacy journal if ncessary + if tt.legacy { + chain.writeLegacyJournal = true + } + // Pull the plug on the database, simulating a hard crash + if tt.crash { + db.Close() + + // Start a new blockchain back up and see where the repait leads us + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to reopen persistent database: %v", err) + } + defer db.Close() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + } else { + // Stop the chain normally, then restart + chain.Stop() + + // Insert blocks without enabling snapshot if gapping is required. + if tt.gapped > 0 { + gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.gapped, func(i int, b *BlockGen) {}) + + // Insert a few more blocks without enabling snapshot + var cacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + } + chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + chain.InsertChain(gappedBlocks) + chain.Stop() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + } else if tt.setHead != 0 { + // Rewind the chain if setHead operation is required. + chain.SetHead(tt.setHead) + chain.Stop() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + } else { + // Restart the chain normally + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + } + } + // Iterate over all the remaining blocks and ensure there are no gaps + verifyNoGaps(t, chain, true, blocks) + verifyCutoff(t, chain, true, blocks, tt.expCanonicalBlocks) + + if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) + } + if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) + } + if tt.expSnapshotBottom != 0 { + block := chain.GetBlockByNumber(tt.expSnapshotBottom) + if block == nil { + t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", tt.expSnapshotBottom) + } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { + t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) + } + } +} diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 70ddecbb7400..de2458e22b82 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -363,3 +363,50 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root()) return base, nil } + +// LegacyJournal writes the persistent layer generator stats into a buffer +// to be stored in the database as the snapshot journal. +// +// Note it's the legacy version which is only used in testing right now. +func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) { + // If the snapshot is currently being generated, abort it + var stats *generatorStats + if dl.genAbort != nil { + abort := make(chan *generatorStats) + dl.genAbort <- abort + + if stats = <-abort; stats != nil { + stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker) + } + } + // Ensure the layer didn't get stale + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return common.Hash{}, ErrSnapshotStale + } + // Write out the generator marker + entry := journalGenerator{ + Done: dl.genMarker == nil, + Marker: dl.genMarker, + } + if stats != nil { + entry.Wiping = (stats.wiping != nil) + entry.Accounts = stats.accounts + entry.Slots = stats.slots + entry.Storage = uint64(stats.storage) + } + if err := rlp.Encode(buffer, entry); err != nil { + return common.Hash{}, err + } + return dl.root, nil +} + +// Journal writes the memory layer contents into a buffer to be stored in the +// database as the snapshot journal. +// +// Note it's the legacy version which is only used in testing right now. +func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) { + return dl.Journal(buffer) +} diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index fd1daeab835d..d23199d5bda8 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -137,6 +137,10 @@ type snapshot interface { // flattening everything down (bad for reorgs). Journal(buffer *bytes.Buffer) (common.Hash, error) + // LegacyJournal is basically identical to Journal. it's the legacy version for + // flushing legacy journal. Now the only purpose of this function is for testing. + LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) + // Stale return whether this layer has become stale (was flattened across) or // if it's still live. Stale() bool @@ -588,6 +592,29 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { return base, nil } +// LegacyJournal is basically identical to Journal. it's the legacy +// version for flushing legacy journal. Now the only purpose of this +// function is for testing. +func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) { + // Retrieve the head snapshot to journal from var snap snapshot + snap := t.Snapshot(root) + if snap == nil { + return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) + } + // Run the journaling + t.lock.Lock() + defer t.lock.Unlock() + + journal := new(bytes.Buffer) + base, err := snap.(snapshot).LegacyJournal(journal) + if err != nil { + return common.Hash{}, err + } + // Store the journal into the database and return + rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes()) + return base, nil +} + // Rebuild wipes all available snapshot data from the persistent database and // discard all caches and diff layers. Afterwards, it starts a new snapshot // generator with the given root hash. @@ -695,3 +722,11 @@ func (t *Tree) generating() (bool, error) { defer layer.lock.RUnlock() return layer.genMarker != nil, nil } + +// diskRoot is a external helper function to return the disk layer root. +func (t *Tree) DiskRoot() common.Hash { + t.lock.Lock() + defer t.lock.Unlock() + + return t.diskRoot() +} From 959f3931dccdccd77c7ae9929fd88f22c1589956 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 24 Sep 2020 14:40:43 +0800 Subject: [PATCH 10/19] core/state: fix comment --- core/state/snapshot/journal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index de2458e22b82..befee0012be7 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -185,7 +185,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, // the disk layer is always higher than chain head. It can // be eventually recovered when the chain head beyonds the // disk layer. - log.Warn("Snapshot is not continous with chain, wait recovery", "snaproot", head, "chainroot", root) + log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root) } // Everything loaded correctly, resume any suspended operations if !generator.Done { From 84af27ee6e56d50703bd4e2b87fb052afbb9859c Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 24 Sep 2020 15:55:54 +0800 Subject: [PATCH 11/19] core: add recovery flag for snapshot --- core/blockchain.go | 29 ++++++++++++++++++++++------ core/rawdb/accessors_snapshot.go | 33 ++++++++++++++++++++++++++++++++ core/rawdb/schema.go | 3 +++ core/state/snapshot/snapshot.go | 4 ++++ 4 files changed, 63 insertions(+), 6 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index a51821c23c40..0ed59bf864f8 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -280,7 +280,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par return nil, err } // Make sure the state associated with the block is available - head, recoverSnapshot := bc.CurrentBlock(), false + head := bc.CurrentBlock() if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -288,18 +288,23 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par var diskRoot common.Hash if bc.cacheConfig.SnapshotLimit > 0 { diskRoot = rawdb.ReadSnapshotRoot(bc.db) - recoverSnapshot = true } if diskRoot != (common.Hash{}) { log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) - var marked bool + + var snapDisk uint64 if err := bc.SetHeadWithCondition(head.NumberU64(), func(block *types.Block, canStop *bool) { - if block.Root() == diskRoot && !marked { - marked, *canStop = true, true + if block.Root() == diskRoot && snapDisk == 0 { + snapDisk, *canStop = block.NumberU64(), true } }); err != nil { return nil, err } + // Chain is rewound, persist a flag in the disk to indicate + // the snapshot is in recovery procedure. + if snapDisk != 0 { + rawdb.WriteSnapshotRecoveryFlag(bc.db, snapDisk) + } } else { log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) if err := bc.SetHead(head.NumberU64()); err != nil { @@ -360,7 +365,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par } // Load any existing snapshot, regenerating it if loading failed if bc.cacheConfig.SnapshotLimit > 0 { - bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait, recoverSnapshot) + var ( + recovery bool + flag = rawdb.ReadSnapshotRecoveryFlag(bc.db) + ) + // The persisted snapshot recovery height is higher than + // current chain head. At least it means the snapshot disk + // layer is higher than current head. Enable the snapshot + // recovery mode. + if flag != nil && *flag > bc.CurrentBlock().NumberU64() { + recovery = true + log.Info("Enable snapshot recovery", "chainhead", bc.CurrentBlock().NumberU64(), "disklayer", *flag) + } + bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait, recovery) } // Take ownership of this particular state go bc.update() diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index a83e7cd69eb6..656b3c22a7fa 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -17,6 +17,7 @@ package rawdb import ( + "encoding/binary" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -141,3 +142,35 @@ func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) { log.Crit("Failed to remove snapshot generator", "err", err) } } + +// ReadSnapshotRecoveryFlag retrieves the serialized snapshot recovery flag +// saved at the last recovery procedure. +func ReadSnapshotRecoveryFlag(db ethdb.KeyValueReader) *uint64 { + data, _ := db.Get(snapshotRecoveryKey) + if len(data) == 0 { + return nil + } + if len(data) != 8 { + return nil + } + number := binary.BigEndian.Uint64(data) + return &number +} + +// WriteSnapshotRecoveryFlag stores the serialized snapshot recovery flag to +// save in the recovery procedure. +func WriteSnapshotRecoveryFlag(db ethdb.KeyValueWriter, number uint64) { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], number) + if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil { + log.Crit("Failed to store snapshot recovery flag", "err", err) + } +} + +// DeleteSnapshotRecoveryFlag deletes the serialized snapshot recovery flag +// saved at the last recovery procedure. +func DeleteSnapshotRecoveryFlag(db ethdb.KeyValueWriter) { + if err := db.Delete(snapshotRecoveryKey); err != nil { + log.Crit("Failed to remove snapshot recovery flag", "err", err) + } +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 7d43e7189881..dbc5025d5d2c 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -54,6 +54,9 @@ var ( // snapshotGeneratorKey tracks the snapshot generation marker across restarts. snapshotGeneratorKey = []byte("SnapshotGenerator") + // snapshotRecoveryKey tracks the snapshot recovery marker across restarts. + snapshotRecoveryKey = []byte("SnapshotRecovery") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index d23199d5bda8..732433dda147 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -622,6 +622,10 @@ func (t *Tree) Rebuild(root common.Hash) { t.lock.Lock() defer t.lock.Unlock() + // Firstly delete any recovery flag in the database. Becasue now we are + // building a brand new snapshot. + rawdb.DeleteSnapshotRecoveryFlag(t.diskdb) + // Track whether there's a wipe currently running and keep it alive if so var wiper chan struct{} From 95bf1a4cfc9988f604089883d021c67a58789e0d Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 24 Sep 2020 20:25:01 +0800 Subject: [PATCH 12/19] core: add restart after start-after-crash tests --- core/blockchain_snapshot_test.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 8ea27eb37053..bf5fb9ba2cc3 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -377,13 +377,23 @@ func testSnapshot(t *testing.T, tt *snapshotTest) { if tt.crash { db.Close() - // Start a new blockchain back up and see where the repait leads us + // Start a new blockchain back up and see where the repair leads us db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } defer db.Close() + // The interesting thing is: instead of start the blockchain after + // the crash, we do restart twice here: one after the crash and one + // after the normal stop. It's used to ensure the broken snapshot + // can be detected all the time. + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + chain.Stop() + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) From e6355e254bdbab832c3b7915a2afb4c5a9b41aac Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 24 Sep 2020 20:27:53 +0800 Subject: [PATCH 13/19] core/rawdb: fix imports --- core/rawdb/accessors_snapshot.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index 656b3c22a7fa..adf0ef9e8008 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -18,6 +18,7 @@ package rawdb import ( "encoding/binary" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" From 64faa3b68c5f3e708969a04edea8faad97bb9988 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 28 Oct 2020 19:50:46 +0800 Subject: [PATCH 14/19] core: fix tests --- core/blockchain_snapshot_test.go | 168 +++++++++++++++++-------------- core/state/snapshot/journal.go | 46 ++++++++- 2 files changed, 135 insertions(+), 79 deletions(-) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index bf5fb9ba2cc3..92517fe0e76b 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -101,7 +102,8 @@ func TestRestartWithLegacySnapshot(t *testing.T) { // In this case the chain head should be rewound to the point // with available state. And also the new head should must be lower // than disk layer. But there is no committed point so the chain -// should be rewound to genesis. +// should be rewound to genesis and the disk layer should be left +// for recovery. func TestNoCommitCrashWithNewSnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: false, @@ -123,7 +125,8 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { // In this case the chain head should be rewound to the point // with available state. And also the new head should must be lower // than disk layer. But there is only a low committed point so the -// chain should be rewound to committed point. +// chain should be rewound to committed point and the disk layer +// should be left for recovery. func TestLowCommitCrashWithNewSnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: false, @@ -145,7 +148,8 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { // In this case the chain head should be rewound to the point // with available state. And also the new head should must be lower // than disk layer. But there is only a high committed point so the -// chain should be rewound to genesis. +// chain should be rewound to genesis and the disk layer should be +// left for recovery. func TestHighCommitCrashWithNewSnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: false, @@ -165,7 +169,8 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) { // Tests a Geth was crashed and restarts with a broken and "legacy format" // snapshot. In this case the entire legacy snapshot should be discared -// and rebuild from the new chain head. +// and rebuild from the new chain head. The new head here refers to the +// genesis because there is no committed point. func TestNoCommitCrashWithLegacySnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: true, @@ -179,17 +184,14 @@ func TestNoCommitCrashWithLegacySnapshot(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery + expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis) }) } // Tests a Geth was crashed and restarts with a broken and "legacy format" // snapshot. In this case the entire legacy snapshot should be discared -// and rebuild from the new chain head. -// -// The special thing in this test case is: we only persist the snasphot -// journal during the shutdown previously. In this case after the crash -// there is no journal at all. +// and rebuild from the new chain head. The new head here refers to the +// block-2 because it's committed into the disk. func TestLowCommitCrashWithLegacySnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: true, @@ -203,13 +205,18 @@ func TestLowCommitCrashWithLegacySnapshot(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 2, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery + expSnapshotBottom: 2, // Rebuilt snapshot from the latest HEAD }) } // Tests a Geth was crashed and restarts with a broken and "legacy format" // snapshot. In this case the entire legacy snapshot should be discared // and rebuild from the new chain head. +// The new head here refers to the the genesis, the reason is: +// - the state of block-6 is committed into the disk +// - the legacy disk layer of block-4 is committed into the disk +// - the head is rewound the genesis in order to find an available +// state lower than disk layer func TestHighCommitCrashWithLegacySnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: true, @@ -223,7 +230,7 @@ func TestHighCommitCrashWithLegacySnapshot(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery + expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis) }) } @@ -243,7 +250,7 @@ func TestGappedNewSnapshot(t *testing.T) { expHeadHeader: 10, expHeadFastBlock: 10, expHeadBlock: 10, - expSnapshotBottom: 10, // rebuild from the head + expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD }) } @@ -263,12 +270,13 @@ func TestGappedLegacySnapshot(t *testing.T) { expHeadHeader: 10, expHeadFastBlock: 10, expHeadBlock: 10, - expSnapshotBottom: 10, // rebuild from the head + expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD }) } -// Tests the Geth was running with snapshot enabled and then a resetHead -// is required. Check after the reset everything is rollbacked correctly. +// Tests the Geth was running with snapshot enabled and resetHead is applied. +// In this case the head is rewound to the target(with state available). After +// that the chain is restarted and the original disk layer is kept. func TestSetHeadWithNewSnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: false, @@ -277,17 +285,18 @@ func TestSetHeadWithNewSnapshot(t *testing.T) { setHead: 4, chainBlocks: 8, snapshotBlock: 0, - commitBlock: 2, + commitBlock: 0, expCanonicalBlocks: 4, expHeadHeader: 4, expHeadFastBlock: 4, - expHeadBlock: 2, - expSnapshotBottom: 2, // rebuild from the head + expHeadBlock: 4, + expSnapshotBottom: 0, // The inital disk layer is built from the genesis }) } -// Tests the Geth was running with legacy snapshot enabled and then a resetHead -// is required. Check after the reset everything is rollbacked correctly. +// Tests the Geth was running with snapshot(legacy-format) enabled and resetHead +// is applied. In this case the head is rewound to the target(with state available). +// After that the chain is restarted and the original disk layer is kept. func TestSetHeadWithLegacySnapshot(t *testing.T) { testSnapshot(t, &snapshotTest{ legacy: true, @@ -296,18 +305,18 @@ func TestSetHeadWithLegacySnapshot(t *testing.T) { setHead: 4, chainBlocks: 8, snapshotBlock: 0, - commitBlock: 2, + commitBlock: 0, expCanonicalBlocks: 4, expHeadHeader: 4, expHeadFastBlock: 4, - expHeadBlock: 2, - expSnapshotBottom: 2, // rebuild from the head + expHeadBlock: 4, + expSnapshotBottom: 0, // The inital disk layer is built from the genesis }) } func testSnapshot(t *testing.T, tt *snapshotTest) { // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(true)) // Create a temporary persistent database @@ -358,11 +367,16 @@ func testSnapshot(t *testing.T, tt *snapshotTest) { chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil) } if tt.snapshotBlock > 0 && tt.snapshotBlock == point { - chain.snaps.Cap(blocks[point-1].Root(), 0) - - diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() - if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { - t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) + if tt.legacy { + // Here we commit the snapshot disk root to simulate + // committing the legacy snapshot. + rawdb.WriteSnapshotRoot(db, blocks[point-1].Root()) + } else { + chain.snaps.Cap(blocks[point-1].Root(), 0) + diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() + if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { + t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) + } } } } @@ -399,52 +413,51 @@ func testSnapshot(t *testing.T, tt *snapshotTest) { t.Fatalf("Failed to recreate chain: %v", err) } defer chain.Stop() - } else { - // Stop the chain normally, then restart + } else if tt.gapped > 0 { + // Insert blocks without enabling snapshot if gapping is required. chain.Stop() - // Insert blocks without enabling snapshot if gapping is required. - if tt.gapped > 0 { - gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.gapped, func(i int, b *BlockGen) {}) - - // Insert a few more blocks without enabling snapshot - var cacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, - } - chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - chain.InsertChain(gappedBlocks) - chain.Stop() + gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.gapped, func(i int, b *BlockGen) {}) - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } else if tt.setHead != 0 { - // Rewind the chain if setHead operation is required. - chain.SetHead(tt.setHead) - chain.Stop() - - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } else { - // Restart the chain normally - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() + // Insert a few more blocks without enabling snapshot + var cacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + } + chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) } + chain.InsertChain(gappedBlocks) + chain.Stop() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + } else if tt.setHead != 0 { + // Rewind the chain if setHead operation is required. + chain.SetHead(tt.setHead) + chain.Stop() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + } else { + chain.Stop() + // Restart the chain normally + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() } + // Iterate over all the remaining blocks and ensure there are no gaps verifyNoGaps(t, chain, true, blocks) verifyCutoff(t, chain, true, blocks, tt.expCanonicalBlocks) @@ -458,12 +471,11 @@ func testSnapshot(t *testing.T, tt *snapshotTest) { if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) } - if tt.expSnapshotBottom != 0 { - block := chain.GetBlockByNumber(tt.expSnapshotBottom) - if block == nil { - t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", tt.expSnapshotBottom) - } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { - t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) - } + // Check the disk layer, ensure they are matched + block := chain.GetBlockByNumber(tt.expSnapshotBottom) + if block == nil { + t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", tt.expSnapshotBottom) + } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { + t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) } } diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index befee0012be7..155e4801ac71 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -408,5 +408,49 @@ func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) { // // Note it's the legacy version which is only used in testing right now. func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) { - return dl.Journal(buffer) + // Journal the parent first + base, err := dl.parent.LegacyJournal(buffer) + if err != nil { + return common.Hash{}, err + } + // Ensure the layer didn't get stale + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.Stale() { + return common.Hash{}, ErrSnapshotStale + } + // Everything below was journalled, persist this layer too + if err := rlp.Encode(buffer, dl.root); err != nil { + return common.Hash{}, err + } + destructs := make([]journalDestruct, 0, len(dl.destructSet)) + for hash := range dl.destructSet { + destructs = append(destructs, journalDestruct{Hash: hash}) + } + if err := rlp.Encode(buffer, destructs); err != nil { + return common.Hash{}, err + } + accounts := make([]journalAccount, 0, len(dl.accountData)) + for hash, blob := range dl.accountData { + accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) + } + if err := rlp.Encode(buffer, accounts); err != nil { + return common.Hash{}, err + } + storage := make([]journalStorage, 0, len(dl.storageData)) + for hash, slots := range dl.storageData { + keys := make([]common.Hash, 0, len(slots)) + vals := make([][]byte, 0, len(slots)) + for key, val := range slots { + keys = append(keys, key) + vals = append(vals, val) + } + storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) + } + if err := rlp.Encode(buffer, storage); err != nil { + return common.Hash{}, err + } + log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root()) + return base, nil } From 6d1f63db7e845291dce659301298e5073c5bede9 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 28 Oct 2020 20:02:03 +0800 Subject: [PATCH 15/19] core: remove log --- core/blockchain_snapshot_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 92517fe0e76b..69a2f8109325 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -29,7 +29,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -316,7 +315,7 @@ func TestSetHeadWithLegacySnapshot(t *testing.T) { func testSnapshot(t *testing.T, tt *snapshotTest) { // It's hard to follow the test case, visualize the input - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(true)) // Create a temporary persistent database From de9e24e3d7b7c628d21204ba551f1f861166eac4 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 28 Oct 2020 20:46:57 +0800 Subject: [PATCH 16/19] core/state/snapshot: fix snapshot --- core/state/snapshot/snapshot.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 732433dda147..f1825fb66ed9 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -577,9 +577,13 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { if err := rlp.Encode(journal, journalVersion); err != nil { return common.Hash{}, err } + diskroot := t.diskRoot() + if diskroot == (common.Hash{}) { + return common.Hash{}, errors.New("invalid disk root") + } // Secondly write out the disk layer root, ensure the // diff journal is continuous with disk. - if err := rlp.Encode(journal, t.disklayer().root); err != nil { + if err := rlp.Encode(journal, diskroot); err != nil { return common.Hash{}, err } // Finally write out the journal of each layer in reverse order. @@ -712,6 +716,16 @@ func (t *Tree) disklayer() *diskLayer { } } +// diskRoot is a internal helper function to return the disk layer root. +// The lock of snapTree is assumed to be held already. +func (t *Tree) diskRoot() common.Hash { + disklayer := t.disklayer() + if disklayer == nil { + return common.Hash{} + } + return disklayer.Root() +} + // generating is an internal helper function which reports whether the snapshot // is still under the construction. func (t *Tree) generating() (bool, error) { From ff3dc395ae89cc9003af2d31e1756b70cff61327 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 7 Oct 2020 19:09:15 +0300 Subject: [PATCH 17/19] core: avoid callbacks in SetHead --- core/blockchain.go | 63 ++++++++++++++------------- core/rawdb/accessors_snapshot.go | 22 +++++----- core/state/snapshot/disklayer_test.go | 2 +- core/state/snapshot/journal.go | 2 +- core/state/snapshot/snapshot.go | 4 +- 5 files changed, 48 insertions(+), 45 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 0ed59bf864f8..fb780fcf5285 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -292,18 +292,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par if diskRoot != (common.Hash{}) { log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) - var snapDisk uint64 - if err := bc.SetHeadWithCondition(head.NumberU64(), func(block *types.Block, canStop *bool) { - if block.Root() == diskRoot && snapDisk == 0 { - snapDisk, *canStop = block.NumberU64(), true - } - }); err != nil { + snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot) + if err != nil { return nil, err } - // Chain is rewound, persist a flag in the disk to indicate - // the snapshot is in recovery procedure. + // Chain rewound, persist old snapshot number to indicate recovery procedure if snapDisk != 0 { - rawdb.WriteSnapshotRecoveryFlag(bc.db, snapDisk) + rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) } } else { log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) @@ -367,7 +362,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par if bc.cacheConfig.SnapshotLimit > 0 { var ( recovery bool - flag = rawdb.ReadSnapshotRecoveryFlag(bc.db) + flag = rawdb.ReadSnapshotRecoveryNumber(bc.db) ) // The persisted snapshot recovery height is higher than // current chain head. At least it means the snapshot disk @@ -478,13 +473,29 @@ func (bc *BlockChain) loadLastState() error { return nil } -// setHead rewinds the local chain to a new head. Depending on whether the node +// SetHead rewinds the local chain to a new head. Depending on whether the node // was fast synced or full synced and in which state, the method will try to // delete minimal data from disk whilst retaining chain consistency. -func (bc *BlockChain) setHead(head uint64, onDelete func(block *types.Block, canStop *bool)) error { +func (bc *BlockChain) SetHead(head uint64) error { + _, err := bc.SetHeadBeyondRoot(head, common.Hash{}) + return err +} + +// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition +// that the rewind must pass the specified state root. This method is meant to be +// used when rewiding with snapshots enabled to ensure that we go back further than +// persistent disk layer. Depending on whether the node was fast synced or full, and +// in which state, the method will try to delete minimal data from disk whilst +// retaining chain consistency. +// +// The method returns the block number where the requested root cap was found. +func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) { bc.chainmu.Lock() defer bc.chainmu.Unlock() + // Track the block number of the requested root hash + var rootNumber uint64 // (no root == always 0) + // Retrieve the last pivot block to short circuit rollbacks beyond it and the // current freezer limit to start nuking id underflown pivot := rawdb.ReadLastPivotNumber(bc.db) @@ -500,14 +511,16 @@ func (bc *BlockChain) setHead(head uint64, onDelete func(block *types.Block, can log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock } else { - // Block exists, keep rewinding until we find one with state - var canStop = onDelete == nil + // Block exists, keep rewinding until we find one with state, + // keeping rewinding until we exceed the optional threshold + // root hash + beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) + for { if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { - // If there is a passed callback for determining - // whether the rewinding should stop, call it. - if onDelete != nil { - onDelete(newHeadBlock, &canStop) + // If a root threshold was requested but not yet crossed, check + if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { + beyondRoot, rootNumber = true, newHeadBlock.NumberU64() } log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) if pivot == nil || newHeadBlock.NumberU64() > *pivot { @@ -518,7 +531,7 @@ func (bc *BlockChain) setHead(head uint64, onDelete func(block *types.Block, can newHeadBlock = bc.genesisBlock } } - if canStop || newHeadBlock.NumberU64() == 0 { + if beyondRoot || newHeadBlock.NumberU64() == 0 { log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } @@ -603,17 +616,7 @@ func (bc *BlockChain) setHead(head uint64, onDelete func(block *types.Block, can bc.txLookupCache.Purge() bc.futureBlocks.Purge() - return bc.loadLastState() -} - -// SetHead is a wrapper of setHead for external usage. -func (bc *BlockChain) SetHead(head uint64) error { - return bc.setHead(head, nil) -} - -// SetHead is a wrapper of setHead for external usage. -func (bc *BlockChain) SetHeadWithCondition(head uint64, onDelete func(block *types.Block, canStop *bool)) error { - return bc.setHead(head, onDelete) + return rootNumber, bc.loadLastState() } // FastSyncCommitHead sets the current head block to the one defined by the hash diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index adf0ef9e8008..5bd48ad5fad5 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -144,9 +144,9 @@ func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) { } } -// ReadSnapshotRecoveryFlag retrieves the serialized snapshot recovery flag -// saved at the last recovery procedure. -func ReadSnapshotRecoveryFlag(db ethdb.KeyValueReader) *uint64 { +// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted +// snapshot layer. +func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 { data, _ := db.Get(snapshotRecoveryKey) if len(data) == 0 { return nil @@ -158,20 +158,20 @@ func ReadSnapshotRecoveryFlag(db ethdb.KeyValueReader) *uint64 { return &number } -// WriteSnapshotRecoveryFlag stores the serialized snapshot recovery flag to -// save in the recovery procedure. -func WriteSnapshotRecoveryFlag(db ethdb.KeyValueWriter, number uint64) { +// WriteSnapshotRecoveryNumber stores the block number of the last persisted +// snapshot layer. +func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) { var buf [8]byte binary.BigEndian.PutUint64(buf[:], number) if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil { - log.Crit("Failed to store snapshot recovery flag", "err", err) + log.Crit("Failed to store snapshot recovery number", "err", err) } } -// DeleteSnapshotRecoveryFlag deletes the serialized snapshot recovery flag -// saved at the last recovery procedure. -func DeleteSnapshotRecoveryFlag(db ethdb.KeyValueWriter) { +// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted +// snapshot layer. +func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) { if err := db.Delete(snapshotRecoveryKey); err != nil { - log.Crit("Failed to remove snapshot recovery flag", "err", err) + log.Crit("Failed to remove snapshot recovery number", "err", err) } } diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index aa4df9df4461..40ff5ade4c37 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -479,7 +479,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { if err := rlp.DecodeBytes(blob, &generator); err != nil { t.Fatalf("Failed to decode snapshot generator %v", err) } - if bytes.Compare(generator.Marker, genMarker) != 0 { + if !bytes.Equal(generator.Marker, genMarker) { t.Fatalf("Generator marker is not matched") } // Test senario 2, the disk layer is fully generated diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 155e4801ac71..94c194a9f5d9 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -185,7 +185,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, // the disk layer is always higher than chain head. It can // be eventually recovered when the chain head beyonds the // disk layer. - log.Warn("Snapshot is not continous with chain", "snaproot", head, "chainroot", root) + log.Warn("Snapshot is not continuous with chain", "snaproot", head, "chainroot", root) } // Everything loaded correctly, resume any suspended operations if !generator.Done { diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index f1825fb66ed9..6ad4451ea34f 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -626,9 +626,9 @@ func (t *Tree) Rebuild(root common.Hash) { t.lock.Lock() defer t.lock.Unlock() - // Firstly delete any recovery flag in the database. Becasue now we are + // Firstly delete any recovery flag in the database. Because now we are // building a brand new snapshot. - rawdb.DeleteSnapshotRecoveryFlag(t.diskdb) + rawdb.DeleteSnapshotRecoveryNumber(t.diskdb) // Track whether there's a wipe currently running and keep it alive if so var wiper chan struct{} From 64b5566083a802728c304d0caf8d0034c63e8d47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 7 Oct 2020 21:22:36 +0300 Subject: [PATCH 18/19] core: fix setHead cornercase where the threshold root has state --- core/blockchain.go | 33 ++-- core/blockchain_repair_test.go | 319 ++++++++++++++++++++++++++------ core/blockchain_sethead_test.go | 319 ++++++++++++++++++++++++++------ 3 files changed, 544 insertions(+), 127 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index fb780fcf5285..1a058a4ff75d 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -360,19 +360,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par } // Load any existing snapshot, regenerating it if loading failed if bc.cacheConfig.SnapshotLimit > 0 { - var ( - recovery bool - flag = rawdb.ReadSnapshotRecoveryNumber(bc.db) - ) - // The persisted snapshot recovery height is higher than - // current chain head. At least it means the snapshot disk - // layer is higher than current head. Enable the snapshot - // recovery mode. - if flag != nil && *flag > bc.CurrentBlock().NumberU64() { - recovery = true - log.Info("Enable snapshot recovery", "chainhead", bc.CurrentBlock().NumberU64(), "disklayer", *flag) + // If the chain was rewound past the snapshot persistent layer (causing + // a recovery block number to be persisted to disk), check if we're still + // in recovery mode and in that case, don't invalidate the snapshot on a + // head mismatch. + var recover bool + + head := bc.CurrentBlock() + if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() { + log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) + recover = true } - bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait, recovery) + bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover) } // Take ownership of this particular state go bc.update() @@ -517,11 +516,11 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) for { + // If a root threshold was requested but not yet crossed, check + if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { + beyondRoot, rootNumber = true, newHeadBlock.NumberU64() + } if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { - // If a root threshold was requested but not yet crossed, check - if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { - beyondRoot, rootNumber = true, newHeadBlock.NumberU64() - } log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) if pivot == nil || newHeadBlock.NumberU64() > *pivot { newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) @@ -535,7 +534,7 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } - log.Debug("Skip the block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + log.Debug("Skipping block with thershold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding } } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 7ab2e9652e44..b5cd232a9c4f 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -39,7 +39,10 @@ import ( // committed to disk and then the process crashed. In this case we expect the full // chain to be rolled back to the committed block, but the chain data itself left // in the database for replaying. -func TestShortRepair(t *testing.T) { +func TestShortRepair(t *testing.T) { testShortRepair(t, false) } +func TestShortRepairWithSnapshots(t *testing.T) { testShortRepair(t, true) } + +func testShortRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -69,14 +72,17 @@ func TestShortRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain where the fast sync pivot point was // already committed, after which the process crashed. In this case we expect the full // chain to be rolled back to the committed block, but the chain data itself left in // the database for replaying. -func TestShortFastSyncedRepair(t *testing.T) { +func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) } +func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) } + +func testShortFastSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -106,14 +112,17 @@ func TestShortFastSyncedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain where the fast sync pivot point was // not yet committed, but the process crashed. In this case we expect the chain to // detect that it was fast syncing and not delete anything, since we can just pick // up directly where we left off. -func TestShortFastSyncingRepair(t *testing.T) { +func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) } +func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) } + +func testShortFastSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -143,7 +152,7 @@ func TestShortFastSyncingRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a shorter side chain, where a @@ -151,7 +160,10 @@ func TestShortFastSyncingRepair(t *testing.T) { // test scenario the side chain is below the committed block. In this case we expect // the canonical chain to be rolled back to the committed block, but the chain data // itself left in the database for replaying. -func TestShortOldForkedRepair(t *testing.T) { +func TestShortOldForkedRepair(t *testing.T) { testShortOldForkedRepair(t, false) } +func TestShortOldForkedRepairWithSnapshots(t *testing.T) { testShortOldForkedRepair(t, true) } + +func testShortOldForkedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -183,7 +195,7 @@ func TestShortOldForkedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a shorter side chain, where @@ -192,6 +204,13 @@ func TestShortOldForkedRepair(t *testing.T) { // this case we expect the canonical chain to be rolled back to the committed block, // but the chain data itself left in the database for replaying. func TestShortOldForkedFastSyncedRepair(t *testing.T) { + testShortOldForkedFastSyncedRepair(t, false) +} +func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) { + testShortOldForkedFastSyncedRepair(t, true) +} + +func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -223,7 +242,7 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a shorter side chain, where @@ -232,6 +251,13 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) { // the chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. func TestShortOldForkedFastSyncingRepair(t *testing.T) { + testShortOldForkedFastSyncingRepair(t, false) +} +func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) { + testShortOldForkedFastSyncingRepair(t, true) +} + +func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -263,7 +289,7 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a shorter side chain, where a @@ -271,7 +297,10 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) { // test scenario the side chain reaches above the committed block. In this case we // expect the canonical chain to be rolled back to the committed block, but the // chain data itself left in the database for replaying. -func TestShortNewlyForkedRepair(t *testing.T) { +func TestShortNewlyForkedRepair(t *testing.T) { testShortNewlyForkedRepair(t, false) } +func TestShortNewlyForkedRepairWithSnapshots(t *testing.T) { testShortNewlyForkedRepair(t, true) } + +func testShortNewlyForkedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -303,7 +332,7 @@ func TestShortNewlyForkedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a shorter side chain, where @@ -312,6 +341,13 @@ func TestShortNewlyForkedRepair(t *testing.T) { // In this case we expect the canonical chain to be rolled back to the committed // block, but the chain data itself left in the database for replaying. func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { + testShortNewlyForkedFastSyncedRepair(t, false) +} +func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) { + testShortNewlyForkedFastSyncedRepair(t, true) +} + +func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -343,7 +379,7 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a shorter side chain, where @@ -352,6 +388,13 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { // case we expect the chain to detect that it was fast syncing and not delete // anything, since we can just pick up directly where we left off. func TestShortNewlyForkedFastSyncingRepair(t *testing.T) { + testShortNewlyForkedFastSyncingRepair(t, false) +} +func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) { + testShortNewlyForkedFastSyncingRepair(t, true) +} + +func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -383,14 +426,17 @@ func TestShortNewlyForkedFastSyncingRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a longer side chain, where a // recent block was already committed to disk and then the process crashed. In this // case we expect the canonical chain to be rolled back to the committed block, but // the chain data itself left in the database for replaying. -func TestShortReorgedRepair(t *testing.T) { +func TestShortReorgedRepair(t *testing.T) { testShortReorgedRepair(t, false) } +func TestShortReorgedRepairWithSnapshots(t *testing.T) { testShortReorgedRepair(t, true) } + +func testShortReorgedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -422,7 +468,7 @@ func TestShortReorgedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a longer side chain, where @@ -430,6 +476,13 @@ func TestShortReorgedRepair(t *testing.T) { // crashed. In this case we expect the canonical chain to be rolled back to the // committed block, but the chain data itself left in the database for replaying. func TestShortReorgedFastSyncedRepair(t *testing.T) { + testShortReorgedFastSyncedRepair(t, false) +} +func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) { + testShortReorgedFastSyncedRepair(t, true) +} + +func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -461,7 +514,7 @@ func TestShortReorgedFastSyncedRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a short canonical chain and a longer side chain, where @@ -469,6 +522,13 @@ func TestShortReorgedFastSyncedRepair(t *testing.T) { // this case we expect the chain to detect that it was fast syncing and not delete // anything, since we can just pick up directly where we left off. func TestShortReorgedFastSyncingRepair(t *testing.T) { + testShortReorgedFastSyncingRepair(t, false) +} +func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) { + testShortReorgedFastSyncingRepair(t, true) +} + +func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -500,14 +560,17 @@ func TestShortReorgedFastSyncingRepair(t *testing.T) { expHeadHeader: 8, expHeadFastBlock: 8, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks where a recent // block - newer than the ancient limit - was already committed to disk and then // the process crashed. In this case we expect the chain to be rolled back to the // committed block, with everything afterwads kept as fast sync data. -func TestLongShallowRepair(t *testing.T) { +func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) } +func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) } + +func testLongShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -542,14 +605,17 @@ func TestLongShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks where a recent // block - older than the ancient limit - was already committed to disk and then // the process crashed. In this case we expect the chain to be rolled back to the // committed block, with everything afterwads deleted. -func TestLongDeepRepair(t *testing.T) { +func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) } +func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) } + +func testLongDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -583,7 +649,7 @@ func TestLongDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks where the fast @@ -591,6 +657,13 @@ func TestLongDeepRepair(t *testing.T) { // which the process crashed. In this case we expect the chain to be rolled back // to the committed block, with everything afterwads kept as fast sync data. func TestLongFastSyncedShallowRepair(t *testing.T) { + testLongFastSyncedShallowRepair(t, false) +} +func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongFastSyncedShallowRepair(t, true) +} + +func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -625,14 +698,17 @@ func TestLongFastSyncedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks where the fast // sync pivot point - older than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back // to the committed block, with everything afterwads deleted. -func TestLongFastSyncedDeepRepair(t *testing.T) { +func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) } +func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) } + +func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -666,7 +742,7 @@ func TestLongFastSyncedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks where the fast @@ -675,6 +751,13 @@ func TestLongFastSyncedDeepRepair(t *testing.T) { // syncing and not delete anything, since we can just pick up directly where we // left off. func TestLongFastSyncingShallowRepair(t *testing.T) { + testLongFastSyncingShallowRepair(t, false) +} +func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongFastSyncingShallowRepair(t, true) +} + +func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -709,7 +792,7 @@ func TestLongFastSyncingShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks where the fast @@ -717,7 +800,10 @@ func TestLongFastSyncingShallowRepair(t *testing.T) { // process crashed. In this case we expect the chain to detect that it was fast // syncing and not delete anything, since we can just pick up directly where we // left off. -func TestLongFastSyncingDeepRepair(t *testing.T) { +func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) } +func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) } + +func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -752,7 +838,7 @@ func TestLongFastSyncingDeepRepair(t *testing.T) { expHeadHeader: 24, expHeadFastBlock: 24, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -762,6 +848,13 @@ func TestLongFastSyncingDeepRepair(t *testing.T) { // rolled back to the committed block, with everything afterwads kept as fast // sync data; the side chain completely nuked by the freezer. func TestLongOldForkedShallowRepair(t *testing.T) { + testLongOldForkedShallowRepair(t, false) +} +func TestLongOldForkedShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedShallowRepair(t, true) +} + +func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -797,7 +890,7 @@ func TestLongOldForkedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -806,7 +899,10 @@ func TestLongOldForkedShallowRepair(t *testing.T) { // chain is below the committed block. In this case we expect the canonical chain // to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. -func TestLongOldForkedDeepRepair(t *testing.T) { +func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) } +func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) } + +func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -841,7 +937,7 @@ func TestLongOldForkedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -851,6 +947,13 @@ func TestLongOldForkedDeepRepair(t *testing.T) { // to be rolled back to the committed block, with everything afterwads kept as // fast sync data; the side chain completely nuked by the freezer. func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { + testLongOldForkedFastSyncedShallowRepair(t, false) +} +func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncedShallowRepair(t, true) +} + +func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -886,7 +989,7 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -896,6 +999,13 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { // chain to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { + testLongOldForkedFastSyncedDeepRepair(t, false) +} +func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncedDeepRepair(t, true) +} + +func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -930,7 +1040,7 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -940,6 +1050,13 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { + testLongOldForkedFastSyncingShallowRepair(t, false) +} +func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncingShallowRepair(t, true) +} + +func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -975,7 +1092,7 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -985,6 +1102,13 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { + testLongOldForkedFastSyncingDeepRepair(t, false) +} +func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncingDeepRepair(t, true) +} + +func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1020,7 +1144,7 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { expHeadHeader: 24, expHeadFastBlock: 24, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -1030,6 +1154,13 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { // rolled back to the committed block, with everything afterwads kept as fast // sync data; the side chain completely nuked by the freezer. func TestLongNewerForkedShallowRepair(t *testing.T) { + testLongNewerForkedShallowRepair(t, false) +} +func TestLongNewerForkedShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedShallowRepair(t, true) +} + +func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1065,7 +1196,7 @@ func TestLongNewerForkedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -1074,7 +1205,10 @@ func TestLongNewerForkedShallowRepair(t *testing.T) { // chain is above the committed block. In this case we expect the canonical chain // to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. -func TestLongNewerForkedDeepRepair(t *testing.T) { +func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) } +func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) } + +func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1109,7 +1243,7 @@ func TestLongNewerForkedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -1119,6 +1253,13 @@ func TestLongNewerForkedDeepRepair(t *testing.T) { // to be rolled back to the committed block, with everything afterwads kept as fast // sync data; the side chain completely nuked by the freezer. func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { + testLongNewerForkedFastSyncedShallowRepair(t, false) +} +func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncedShallowRepair(t, true) +} + +func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1154,7 +1295,7 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -1164,6 +1305,13 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { // chain to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { + testLongNewerForkedFastSyncedDeepRepair(t, false) +} +func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncedDeepRepair(t, true) +} + +func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1198,7 +1346,7 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -1208,6 +1356,13 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { + testLongNewerForkedFastSyncingShallowRepair(t, false) +} +func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncingShallowRepair(t, true) +} + +func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1243,7 +1398,7 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a shorter @@ -1253,6 +1408,13 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { + testLongNewerForkedFastSyncingDeepRepair(t, false) +} +func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncingDeepRepair(t, true) +} + +func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1288,7 +1450,7 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { expHeadHeader: 24, expHeadFastBlock: 24, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a longer side @@ -1296,7 +1458,10 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { // to disk and then the process crashed. In this case we expect the chain to be // rolled back to the committed block, with everything afterwads kept as fast sync // data. The side chain completely nuked by the freezer. -func TestLongReorgedShallowRepair(t *testing.T) { +func TestLongReorgedShallowRepair(t *testing.T) { testLongReorgedShallowRepair(t, false) } +func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) } + +func testLongReorgedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1332,7 +1497,7 @@ func TestLongReorgedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a longer side @@ -1340,7 +1505,10 @@ func TestLongReorgedShallowRepair(t *testing.T) { // to disk and then the process crashed. In this case we expect the canonical chains // to be rolled back to the committed block, with everything afterwads deleted. The // side chain completely nuked by the freezer. -func TestLongReorgedDeepRepair(t *testing.T) { +func TestLongReorgedDeepRepair(t *testing.T) { testLongReorgedDeepRepair(t, false) } +func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) } + +func testLongReorgedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1375,7 +1543,7 @@ func TestLongReorgedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a longer @@ -1385,6 +1553,13 @@ func TestLongReorgedDeepRepair(t *testing.T) { // afterwads kept as fast sync data. The side chain completely nuked by the // freezer. func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { + testLongReorgedFastSyncedShallowRepair(t, false) +} +func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongReorgedFastSyncedShallowRepair(t, true) +} + +func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1420,7 +1595,7 @@ func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a longer @@ -1429,6 +1604,13 @@ func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { // expect the canonical chains to be rolled back to the committed block, with // everything afterwads deleted. The side chain completely nuked by the freezer. func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { + testLongReorgedFastSyncedDeepRepair(t, false) +} +func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongReorgedFastSyncedDeepRepair(t, true) +} + +func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1463,7 +1645,7 @@ func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a longer @@ -1472,6 +1654,13 @@ func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { // chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { + testLongReorgedFastSyncingShallowRepair(t, false) +} +func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongReorgedFastSyncingShallowRepair(t, true) +} + +func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1507,7 +1696,7 @@ func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { expHeadHeader: 18, expHeadFastBlock: 18, expHeadBlock: 0, - }) + }, snapshots) } // Tests a recovery for a long canonical chain with frozen blocks and a longer @@ -1516,6 +1705,13 @@ func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { // chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { + testLongReorgedFastSyncingDeepRepair(t, false) +} +func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongReorgedFastSyncingDeepRepair(t, true) +} + +func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1551,12 +1747,12 @@ func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { expHeadHeader: 24, expHeadFastBlock: 24, expHeadBlock: 0, - }) + }, snapshots) } -func testRepair(t *testing.T, tt *rewindTest) { +func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(true)) // Create a temporary persistent database @@ -1574,16 +1770,20 @@ func testRepair(t *testing.T, tt *rewindTest) { // Initialize a fresh chain var ( - genesis = new(Genesis).MustCommit(db) - engine = ethash.NewFullFaker() - cacheConfig = &CacheConfig{ + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, // Disable snapshot + SnapshotLimit: 0, // Disable snapshot by default } ) - chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if snapshots { + config.SnapshotLimit = 256 + config.SnapshotWait = true + } + chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -1606,6 +1806,11 @@ func testRepair(t *testing.T, tt *rewindTest) { } if tt.commitBlock > 0 { chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil) + if snapshots { + if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { + t.Fatalf("Failed to flatten snapshots: %v", err) + } + } } if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { t.Fatalf("Failed to import canonical chain tail: %v", err) diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index e4f81c9c9daf..45c4073eb4ce 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -151,7 +151,10 @@ func (tt *rewindTest) dump(crash bool) string { // chain to be rolled back to the committed block. Everything above the sethead // point should be deleted. In between the committed block and the requested head // the data can remain as "fast sync" data to avoid redownloading it. -func TestShortSetHead(t *testing.T) { +func TestShortSetHead(t *testing.T) { testShortSetHead(t, false) } +func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) } + +func testShortSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -182,7 +185,7 @@ func TestShortSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain where the fast sync pivot point was @@ -191,7 +194,10 @@ func TestShortSetHead(t *testing.T) { // Everything above the sethead point should be deleted. In between the committed // block and the requested head the data can remain as "fast sync" data to avoid // redownloading it. -func TestShortFastSyncedSetHead(t *testing.T) { +func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) } +func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) } + +func testShortFastSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -222,7 +228,7 @@ func TestShortFastSyncedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain where the fast sync pivot point was @@ -230,7 +236,10 @@ func TestShortFastSyncedSetHead(t *testing.T) { // detect that it was fast syncing and delete everything from the new head, since // we can just pick up fast syncing from there. The head full block should be set // to the genesis. -func TestShortFastSyncingSetHead(t *testing.T) { +func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) } +func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) } + +func testShortFastSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -261,7 +270,7 @@ func TestShortFastSyncingSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a shorter side chain, where a @@ -271,7 +280,10 @@ func TestShortFastSyncingSetHead(t *testing.T) { // above the sethead point should be deleted. In between the committed block and // the requested head the data can remain as "fast sync" data to avoid redownloading // it. The side chain should be left alone as it was shorter. -func TestShortOldForkedSetHead(t *testing.T) { +func TestShortOldForkedSetHead(t *testing.T) { testShortOldForkedSetHead(t, false) } +func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) } + +func testShortOldForkedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -304,7 +316,7 @@ func TestShortOldForkedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a shorter side chain, where @@ -315,6 +327,13 @@ func TestShortOldForkedSetHead(t *testing.T) { // committed block and the requested head the data can remain as "fast sync" data // to avoid redownloading it. The side chain should be left alone as it was shorter. func TestShortOldForkedFastSyncedSetHead(t *testing.T) { + testShortOldForkedFastSyncedSetHead(t, false) +} +func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) { + testShortOldForkedFastSyncedSetHead(t, true) +} + +func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -347,7 +366,7 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a shorter side chain, where @@ -357,6 +376,13 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) { // head, since we can just pick up fast syncing from there. The head full block // should be set to the genesis. func TestShortOldForkedFastSyncingSetHead(t *testing.T) { + testShortOldForkedFastSyncingSetHead(t, false) +} +func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) { + testShortOldForkedFastSyncingSetHead(t, true) +} + +func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -389,7 +415,7 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a shorter side chain, where a @@ -403,7 +429,10 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedSetHead(t *testing.T) { +func TestShortNewlyForkedSetHead(t *testing.T) { testShortNewlyForkedSetHead(t, false) } +func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) } + +func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -436,7 +465,7 @@ func TestShortNewlyForkedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a shorter side chain, where @@ -450,6 +479,13 @@ func TestShortNewlyForkedSetHead(t *testing.T) { // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { + testShortNewlyForkedFastSyncedSetHead(t, false) +} +func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) { + testShortNewlyForkedFastSyncedSetHead(t, true) +} + +func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -482,7 +518,7 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a shorter side chain, where @@ -496,6 +532,13 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { + testShortNewlyForkedFastSyncingSetHead(t, false) +} +func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) { + testShortNewlyForkedFastSyncingSetHead(t, true) +} + +func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -528,7 +571,7 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a longer side chain, where a @@ -541,7 +584,10 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedSetHead(t *testing.T) { +func TestShortReorgedSetHead(t *testing.T) { testShortReorgedSetHead(t, false) } +func TestShortReorgedSetHeadWithSnapshots(t *testing.T) { testShortReorgedSetHead(t, true) } + +func testShortReorgedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -574,7 +620,7 @@ func TestShortReorgedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a longer side chain, where @@ -589,6 +635,13 @@ func TestShortReorgedSetHead(t *testing.T) { // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. func TestShortReorgedFastSyncedSetHead(t *testing.T) { + testShortReorgedFastSyncedSetHead(t, false) +} +func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) { + testShortReorgedFastSyncedSetHead(t, true) +} + +func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -621,7 +674,7 @@ func TestShortReorgedFastSyncedSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a short canonical chain and a longer side chain, where @@ -634,6 +687,13 @@ func TestShortReorgedFastSyncedSetHead(t *testing.T) { // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. func TestShortReorgedFastSyncingSetHead(t *testing.T) { + testShortReorgedFastSyncingSetHead(t, false) +} +func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) { + testShortReorgedFastSyncingSetHead(t, true) +} + +func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -666,7 +726,7 @@ func TestShortReorgedFastSyncingSetHead(t *testing.T) { expHeadHeader: 7, expHeadFastBlock: 7, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks where a recent @@ -675,7 +735,10 @@ func TestShortReorgedFastSyncingSetHead(t *testing.T) { // to the committed block. Everything above the sethead point should be deleted. // In between the committed block and the requested head the data can remain as // "fast sync" data to avoid redownloading it. -func TestLongShallowSetHead(t *testing.T) { +func TestLongShallowSetHead(t *testing.T) { testLongShallowSetHead(t, false) } +func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) } + +func testLongShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -711,7 +774,7 @@ func TestLongShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks where a recent @@ -719,7 +782,10 @@ func TestLongShallowSetHead(t *testing.T) { // sethead was called. In this case we expect the full chain to be rolled back // to the committed block. Since the ancient limit was underflown, everything // needs to be deleted onwards to avoid creating a gap. -func TestLongDeepSetHead(t *testing.T) { +func TestLongDeepSetHead(t *testing.T) { testLongDeepSetHead(t, false) } +func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) } + +func testLongDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -754,7 +820,7 @@ func TestLongDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks where the fast @@ -764,6 +830,13 @@ func TestLongDeepSetHead(t *testing.T) { // deleted. In between the committed block and the requested head the data can // remain as "fast sync" data to avoid redownloading it. func TestLongFastSyncedShallowSetHead(t *testing.T) { + testLongFastSyncedShallowSetHead(t, false) +} +func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongFastSyncedShallowSetHead(t, true) +} + +func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -799,7 +872,7 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks where the fast @@ -807,7 +880,10 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) { // which sethead was called. In this case we expect the full chain to be rolled // back to the committed block. Since the ancient limit was underflown, everything // needs to be deleted onwards to avoid creating a gap. -func TestLongFastSyncedDeepSetHead(t *testing.T) { +func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) } +func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) } + +func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -842,7 +918,7 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks where the fast @@ -851,6 +927,13 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) { // syncing and delete everything from the new head, since we can just pick up fast // syncing from there. func TestLongFastSyncingShallowSetHead(t *testing.T) { + testLongFastSyncingShallowSetHead(t, false) +} +func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongFastSyncingShallowSetHead(t, true) +} + +func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -886,7 +969,7 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks where the fast @@ -895,6 +978,13 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) { // syncing and delete everything from the new head, since we can just pick up fast // syncing from there. func TestLongFastSyncingDeepSetHead(t *testing.T) { + testLongFastSyncingDeepSetHead(t, false) +} +func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongFastSyncingDeepSetHead(t, true) +} + +func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -929,7 +1019,7 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter side @@ -940,6 +1030,13 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) { // can remain as "fast sync" data to avoid redownloading it. The side chain is nuked // by the freezer. func TestLongOldForkedShallowSetHead(t *testing.T) { + testLongOldForkedShallowSetHead(t, false) +} +func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedShallowSetHead(t, true) +} + +func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -976,7 +1073,7 @@ func TestLongOldForkedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter side @@ -985,7 +1082,10 @@ func TestLongOldForkedShallowSetHead(t *testing.T) { // chain to be rolled back to the committed block. Since the ancient limit was // underflown, everything needs to be deleted onwards to avoid creating a gap. The // side chain is nuked by the freezer. -func TestLongOldForkedDeepSetHead(t *testing.T) { +func TestLongOldForkedDeepSetHead(t *testing.T) { testLongOldForkedDeepSetHead(t, false) } +func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) } + +func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1021,7 +1121,7 @@ func TestLongOldForkedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1033,6 +1133,13 @@ func TestLongOldForkedDeepSetHead(t *testing.T) { // requested head the data can remain as "fast sync" data to avoid redownloading // it. The side chain is nuked by the freezer. func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { + testLongOldForkedFastSyncedShallowSetHead(t, false) +} +func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncedShallowSetHead(t, true) +} + +func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1069,7 +1176,7 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1080,6 +1187,13 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { // underflown, everything needs to be deleted onwards to avoid creating a gap. The // side chain is nuked by the freezer. func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { + testLongOldForkedFastSyncedDeepSetHead(t, false) +} +func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncedDeepSetHead(t, true) +} + +func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1115,7 +1229,7 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1126,6 +1240,13 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { // just pick up fast syncing from there. The side chain is completely nuked by the // freezer. func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { + testLongOldForkedFastSyncingShallowSetHead(t, false) +} +func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncingShallowSetHead(t, true) +} + +func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1162,7 +1283,7 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1173,6 +1294,13 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { // just pick up fast syncing from there. The side chain is completely nuked by the // freezer. func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { + testLongOldForkedFastSyncingDeepSetHead(t, false) +} +func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedFastSyncingDeepSetHead(t, true) +} + +func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1208,7 +1336,7 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1217,6 +1345,13 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { // chain is above the committed block. In this case the freezer will delete the // sidechain since it's dangling, reverting to TestLongShallowSetHead. func TestLongNewerForkedShallowSetHead(t *testing.T) { + testLongNewerForkedShallowSetHead(t, false) +} +func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedShallowSetHead(t, true) +} + +func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1253,7 +1388,7 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1262,6 +1397,13 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) { // chain is above the committed block. In this case the freezer will delete the // sidechain since it's dangling, reverting to TestLongDeepSetHead. func TestLongNewerForkedDeepSetHead(t *testing.T) { + testLongNewerForkedDeepSetHead(t, false) +} +func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedDeepSetHead(t, true) +} + +func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1297,7 +1439,7 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1306,6 +1448,13 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) { // the side chain is above the committed block. In this case the freezer will delete // the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead. func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { + testLongNewerForkedFastSyncedShallowSetHead(t, false) +} +func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncedShallowSetHead(t, true) +} + +func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1342,7 +1491,7 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1351,6 +1500,13 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { // the side chain is above the committed block. In this case the freezer will delete // the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead. func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { + testLongNewerForkedFastSyncedDeepSetHead(t, false) +} +func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncedDeepSetHead(t, true) +} + +func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1386,7 +1542,7 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1395,6 +1551,13 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { // chain is above the committed block. In this case the freezer will delete the // sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead. func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { + testLongNewerForkedFastSyncingShallowSetHead(t, false) +} +func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncingShallowSetHead(t, true) +} + +func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1431,7 +1594,7 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a shorter @@ -1440,6 +1603,13 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { // chain is above the committed block. In this case the freezer will delete the // sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead. func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) { + testLongNewerForkedFastSyncingDeepSetHead(t, false) +} +func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedFastSyncingDeepSetHead(t, true) +} + +func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1475,14 +1645,17 @@ func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a longer side // chain, where a recent block - newer than the ancient limit - was already committed // to disk and then sethead was called. In this case the freezer will delete the // sidechain since it's dangling, reverting to TestLongShallowSetHead. -func TestLongReorgedShallowSetHead(t *testing.T) { +func TestLongReorgedShallowSetHead(t *testing.T) { testLongReorgedShallowSetHead(t, false) } +func TestLongReorgedShallowSetHeadWithSnapshots(t *testing.T) { testLongReorgedShallowSetHead(t, true) } + +func testLongReorgedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1519,14 +1692,17 @@ func TestLongReorgedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a longer side // chain, where a recent block - older than the ancient limit - was already committed // to disk and then sethead was called. In this case the freezer will delete the // sidechain since it's dangling, reverting to TestLongDeepSetHead. -func TestLongReorgedDeepSetHead(t *testing.T) { +func TestLongReorgedDeepSetHead(t *testing.T) { testLongReorgedDeepSetHead(t, false) } +func TestLongReorgedDeepSetHeadWithSnapshots(t *testing.T) { testLongReorgedDeepSetHead(t, true) } + +func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1562,7 +1738,7 @@ func TestLongReorgedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a longer @@ -1571,6 +1747,13 @@ func TestLongReorgedDeepSetHead(t *testing.T) { // freezer will delete the sidechain since it's dangling, reverting to // TestLongFastSyncedShallowSetHead. func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { + testLongReorgedFastSyncedShallowSetHead(t, false) +} +func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongReorgedFastSyncedShallowSetHead(t, true) +} + +func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1607,7 +1790,7 @@ func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a longer @@ -1616,6 +1799,13 @@ func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { // freezer will delete the sidechain since it's dangling, reverting to // TestLongFastSyncedDeepSetHead. func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { + testLongReorgedFastSyncedDeepSetHead(t, false) +} +func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongReorgedFastSyncedDeepSetHead(t, true) +} + +func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1651,7 +1841,7 @@ func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a longer @@ -1661,6 +1851,13 @@ func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { // head, since we can just pick up fast syncing from there. The side chain is // completely nuked by the freezer. func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { + testLongReorgedFastSyncingShallowSetHead(t, false) +} +func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongReorgedFastSyncingShallowSetHead(t, true) +} + +func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1697,7 +1894,7 @@ func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } // Tests a sethead for a long canonical chain with frozen blocks and a longer @@ -1707,6 +1904,13 @@ func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { // head, since we can just pick up fast syncing from there. The side chain is // completely nuked by the freezer. func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { + testLongReorgedFastSyncingDeepSetHead(t, false) +} +func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongReorgedFastSyncingDeepSetHead(t, true) +} + +func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1742,10 +1946,10 @@ func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { expHeadHeader: 6, expHeadFastBlock: 6, expHeadBlock: 0, - }) + }, snapshots) } -func testSetHead(t *testing.T, tt *rewindTest) { +func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { // It's hard to follow the test case, visualize the input // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(false)) @@ -1765,16 +1969,20 @@ func testSetHead(t *testing.T, tt *rewindTest) { // Initialize a fresh chain var ( - genesis = new(Genesis).MustCommit(db) - engine = ethash.NewFullFaker() - cacheConfig = &CacheConfig{ + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, // Disable snapshot } ) - chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if snapshots { + config.SnapshotLimit = 256 + config.SnapshotWait = true + } + chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -1797,6 +2005,11 @@ func testSetHead(t *testing.T, tt *rewindTest) { } if tt.commitBlock > 0 { chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil) + if snapshots { + if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { + t.Fatalf("Failed to flatten snapshots: %v", err) + } + } } if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { t.Fatalf("Failed to import canonical chain tail: %v", err) From 3d01d79c87408a75689a2d6c75aafb44e22077bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 29 Oct 2020 17:15:29 +0200 Subject: [PATCH 19/19] core: small docs for the test cases --- core/blockchain.go | 2 +- core/blockchain_snapshot_test.go | 300 ++++++++++++++++++++++++++++--- core/state/snapshot/journal.go | 4 +- 3 files changed, 279 insertions(+), 27 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 1a058a4ff75d..1c8a7fe60a9f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -534,7 +534,7 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } - log.Debug("Skipping block with thershold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) + log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding } } diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 69a2f8109325..cb499593e38a 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -21,8 +21,10 @@ package core import ( "bytes" + "fmt" "io/ioutil" "os" + "strings" "testing" "time" @@ -57,10 +59,75 @@ type snapshotTest struct { expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer } +func (tt *snapshotTest) dump() string { + buffer := new(strings.Builder) + + fmt.Fprint(buffer, "Chain:\n G") + for i := 0; i < tt.chainBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprint(buffer, " (HEAD)\n\n") + + fmt.Fprintf(buffer, "Commit: G") + if tt.commitBlock > 0 { + fmt.Fprintf(buffer, ", C%d", tt.commitBlock) + } + fmt.Fprint(buffer, "\n") + + fmt.Fprintf(buffer, "Snapshot: G") + if tt.snapshotBlock > 0 { + fmt.Fprintf(buffer, ", C%d", tt.snapshotBlock) + } + fmt.Fprint(buffer, "\n") + + if tt.crash { + fmt.Fprintf(buffer, "\nCRASH\n\n") + } else { + fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setHead) + } + fmt.Fprintf(buffer, "------------------------------\n\n") + + fmt.Fprint(buffer, "Expected in leveldb:\n G") + for i := 0; i < tt.expCanonicalBlocks; i++ { + fmt.Fprintf(buffer, "->C%d", i+1) + } + fmt.Fprintf(buffer, "\n\n") + fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader) + fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock) + if tt.expHeadBlock == 0 { + fmt.Fprintf(buffer, "Expected head block : G\n") + } else { + fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock) + } + if tt.expSnapshotBottom == 0 { + fmt.Fprintf(buffer, "Expected snapshot disk : G\n") + } else { + fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", tt.expSnapshotBottom) + } + return buffer.String() +} + // Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot // journal will be persisted correctly. In this case no snapshot recovery is // required. func TestRestartWithNewSnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(0) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C8 + // Expected snapshot disk : G testSnapshot(t, &snapshotTest{ legacy: false, crash: false, @@ -81,6 +148,23 @@ func TestRestartWithNewSnapshot(t *testing.T) { // all snapshot journal will be persisted correctly. In this case no snapshot // recovery is required. func TestRestartWithLegacySnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(0) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C8 + // Expected snapshot disk : G testSnapshot(t, &snapshotTest{ legacy: true, crash: false, @@ -97,13 +181,29 @@ func TestRestartWithLegacySnapshot(t *testing.T) { }) } -// Tests a Geth was crashed and restarts with a broken snapshot. -// In this case the chain head should be rewound to the point -// with available state. And also the new head should must be lower -// than disk layer. But there is no committed point so the chain -// should be rewound to genesis and the disk layer should be left +// Tests a Geth was crashed and restarts with a broken snapshot. In this case the +// chain head should be rewound to the point with available state. And also the +// new head should must be lower than disk layer. But there is no committed point +// so the chain should be rewound to genesis and the disk layer should be left // for recovery. func TestNoCommitCrashWithNewSnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G, C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + // Expected snapshot disk : C4 testSnapshot(t, &snapshotTest{ legacy: false, crash: true, @@ -120,13 +220,29 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { }) } -// Tests a Geth was crashed and restarts with a broken snapshot. -// In this case the chain head should be rewound to the point -// with available state. And also the new head should must be lower -// than disk layer. But there is only a low committed point so the -// chain should be rewound to committed point and the disk layer +// Tests a Geth was crashed and restarts with a broken snapshot. In this case the +// chain head should be rewound to the point with available state. And also the +// new head should must be lower than disk layer. But there is only a low committed +// point so the chain should be rewound to committed point and the disk layer // should be left for recovery. func TestLowCommitCrashWithNewSnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G, C2 + // Snapshot: G, C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C2 + // Expected snapshot disk : C4 testSnapshot(t, &snapshotTest{ legacy: false, crash: true, @@ -143,13 +259,29 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { }) } -// Tests a Geth was crashed and restarts with a broken snapshot. -// In this case the chain head should be rewound to the point -// with available state. And also the new head should must be lower -// than disk layer. But there is only a high committed point so the -// chain should be rewound to genesis and the disk layer should be -// left for recovery. +// Tests a Geth was crashed and restarts with a broken snapshot. In this case +// the chain head should be rewound to the point with available state. And also +// the new head should must be lower than disk layer. But there is only a high +// committed point so the chain should be rewound to genesis and the disk layer +// should be left for recovery. func TestHighCommitCrashWithNewSnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G, C6 + // Snapshot: G, C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + // Expected snapshot disk : C4 testSnapshot(t, &snapshotTest{ legacy: false, crash: true, @@ -171,6 +303,23 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) { // and rebuild from the new chain head. The new head here refers to the // genesis because there is no committed point. func TestNoCommitCrashWithLegacySnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G, C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + // Expected snapshot disk : G testSnapshot(t, &snapshotTest{ legacy: true, crash: true, @@ -192,6 +341,23 @@ func TestNoCommitCrashWithLegacySnapshot(t *testing.T) { // and rebuild from the new chain head. The new head here refers to the // block-2 because it's committed into the disk. func TestLowCommitCrashWithLegacySnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G, C2 + // Snapshot: G, C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : C2 + // Expected snapshot disk : C2 testSnapshot(t, &snapshotTest{ legacy: true, crash: true, @@ -211,12 +377,30 @@ func TestLowCommitCrashWithLegacySnapshot(t *testing.T) { // Tests a Geth was crashed and restarts with a broken and "legacy format" // snapshot. In this case the entire legacy snapshot should be discared // and rebuild from the new chain head. +// // The new head here refers to the the genesis, the reason is: -// - the state of block-6 is committed into the disk -// - the legacy disk layer of block-4 is committed into the disk -// - the head is rewound the genesis in order to find an available -// state lower than disk layer +// - the state of block-6 is committed into the disk +// - the legacy disk layer of block-4 is committed into the disk +// - the head is rewound the genesis in order to find an available +// state lower than disk layer func TestHighCommitCrashWithLegacySnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G, C6 + // Snapshot: G, C4 + // + // CRASH + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8 + // + // Expected head header : C8 + // Expected head fast block: C8 + // Expected head block : G + // Expected snapshot disk : G testSnapshot(t, &snapshotTest{ legacy: true, crash: true, @@ -237,6 +421,23 @@ func TestHighCommitCrashWithLegacySnapshot(t *testing.T) { // enabling snapshot and after that re-enable the snapshot again. In this // case the snapshot should be rebuilt with latest chain head. func TestGappedNewSnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(0) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 + // + // Expected head header : C10 + // Expected head fast block: C10 + // Expected head block : C10 + // Expected snapshot disk : C10 testSnapshot(t, &snapshotTest{ legacy: false, crash: false, @@ -257,6 +458,23 @@ func TestGappedNewSnapshot(t *testing.T) { // without enabling snapshot and after that re-enable the snapshot again. // In this case the snapshot should be rebuilt with latest chain head. func TestGappedLegacySnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(0) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 + // + // Expected head header : C10 + // Expected head fast block: C10 + // Expected head block : C10 + // Expected snapshot disk : C10 testSnapshot(t, &snapshotTest{ legacy: true, crash: false, @@ -277,6 +495,23 @@ func TestGappedLegacySnapshot(t *testing.T) { // In this case the head is rewound to the target(with state available). After // that the chain is restarted and the original disk layer is kept. func TestSetHeadWithNewSnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(4) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4 + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + // Expected snapshot disk : G testSnapshot(t, &snapshotTest{ legacy: false, crash: false, @@ -289,7 +524,7 @@ func TestSetHeadWithNewSnapshot(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - expSnapshotBottom: 0, // The inital disk layer is built from the genesis + expSnapshotBottom: 0, // The initial disk layer is built from the genesis }) } @@ -297,6 +532,23 @@ func TestSetHeadWithNewSnapshot(t *testing.T) { // is applied. In this case the head is rewound to the target(with state available). // After that the chain is restarted and the original disk layer is kept. func TestSetHeadWithLegacySnapshot(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(4) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4 + // + // Expected head header : C4 + // Expected head fast block: C4 + // Expected head block : C4 + // Expected snapshot disk : G testSnapshot(t, &snapshotTest{ legacy: true, crash: false, @@ -309,14 +561,14 @@ func TestSetHeadWithLegacySnapshot(t *testing.T) { expHeadHeader: 4, expHeadFastBlock: 4, expHeadBlock: 4, - expSnapshotBottom: 0, // The inital disk layer is built from the genesis + expSnapshotBottom: 0, // The initial disk layer is built from the genesis }) } func testSnapshot(t *testing.T, tt *snapshotTest) { // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump(true)) + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //fmt.Println(tt.dump()) // Create a temporary persistent database datadir, err := ioutil.TempDir("", "") diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 94c194a9f5d9..2821248a726e 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -107,7 +107,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // layer, we just discard all diffs and try to recover them later. journal := rawdb.ReadSnapshotJournal(db) if len(journal) == 0 { - log.Debug("Loaded snapshot journal", "diskroot", base.root, "diff", "missing") + log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "missing") return base, generator, nil } r := rlp.NewStream(bytes.NewReader(journal), 0) @@ -130,7 +130,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // It can happen that Geth crashes without persisting the latest // diff journal. if !bytes.Equal(root.Bytes(), base.root.Bytes()) { - log.Debug("Loaded snapshot journal", "diskroot", base.root, "diff", "unmatched") + log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "unmatched") return base, generator, nil } // Load all the snapshot diffs from the journal