Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: cherry-pick pbss patch commits from eth repo in v1.13.2 #1916

Merged
merged 10 commits into from
Oct 16, 2023
3 changes: 0 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,6 @@ issues:
- path: core/state/metrics.go
linters:
- unused
- path: core/state/statedb_fuzz_test.go
linters:
- unused
- path: core/txpool/legacypool/list.go
linters:
- staticcheck
Expand Down
7 changes: 3 additions & 4 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1180,8 +1180,7 @@ func (bc *BlockChain) Stop() {
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb
var once sync.Once

var once sync.Once
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
Expand All @@ -1191,9 +1190,9 @@ func (bc *BlockChain) Stop() {
} else {
rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64())
once.Do(func() {
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
})
}
}
}
}
if snapBase != (common.Hash{}) {
Expand Down
20 changes: 20 additions & 0 deletions core/rawdb/accessors_trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
return h.hash(data) == hash
}

// ExistsAccountTrieNode checks the presence of the account trie node with the
// specified node path, regardless of the node hash.
func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool {
has, err := db.Has(accountTrieNodeKey(path))
if err != nil {
return false
}
return has
}

// WriteAccountTrieNode writes the provided account trie node into database.
func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
if err := db.Put(accountTrieNodeKey(path), node); err != nil {
Expand Down Expand Up @@ -127,6 +137,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
return h.hash(data) == hash
}

// ExistsStorageTrieNode checks the presence of the storage trie node with the
// specified account hash and node path, regardless of the node hash.
func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool {
has, err := db.Has(storageTrieNodeKey(accountHash, path))
if err != nil {
return false
}
return has
}

// WriteStorageTrieNode writes the provided storage trie node into database.
func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
Expand Down
6 changes: 6 additions & 0 deletions core/rawdb/freezer_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,9 @@ func (t *freezerTable) repair() error {
}
// Ensure the index is a multiple of indexEntrySize bytes
if overflow := stat.Size() % indexEntrySize; overflow != 0 {
if t.readonly {
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
}
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
}
// Retrieve the file sizes and prepare for truncation
Expand Down Expand Up @@ -278,6 +281,9 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset)
for contentExp != contentSize {
if t.readonly {
return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum)
}
verbose = true
// Truncate the head file to the last offset pointer
if contentExp < contentSize {
Expand Down
12 changes: 10 additions & 2 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,11 @@ func accountSnapshotKey(hash common.Hash) []byte {

// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...)
buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
n := copy(buf, SnapshotStoragePrefix)
n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], storageHash.Bytes())
return buf
}

// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
Expand Down Expand Up @@ -274,7 +278,11 @@ func accountTrieNodeKey(path []byte) []byte {

// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path))
n := copy(buf, trieNodeStoragePrefix)
n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], path)
return buf
}

// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
Expand Down
134 changes: 105 additions & 29 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,12 @@ import (
"github.com/ethereum/go-ethereum/trie/triestate"
)

const defaultNumOfSlots = 100
const (
defaultNumOfSlots = 100
// storageDeleteLimit denotes the highest permissible memory allocation
// employed for contract storage deletion.
storageDeleteLimit = 512 * 1024 * 1024
)

type revision struct {
id int
Expand Down Expand Up @@ -1342,63 +1347,134 @@ func (s *StateDB) clearJournalAndRefund() {
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries
}

// deleteStorage iterates the storage trie belongs to the account and mark all
// slots inside as deleted.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
start := time.Now()
// fastDeleteStorage is the function that efficiently deletes the storage trie
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
if err != nil {
return false, 0, nil, nil, err
}
defer iter.Release()

var (
size common.StorageSize
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path))
})
for iter.Next() {
if size > storageDeleteLimit {
return true, size, nil, nil, nil
}
slot := common.CopyBytes(iter.Slot())
if err := iter.Error(); err != nil { // error might occur after Slot function
return false, 0, nil, nil, err
}
size += common.StorageSize(common.HashLength + len(slot))
slots[iter.Hash()] = slot

if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
return false, 0, nil, nil, err
}
}
if err := iter.Error(); err != nil { // error might occur during iteration
return false, 0, nil, nil, err
}
if stack.Hash() != root {
return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
}
return false, size, slots, nodes, nil
}

// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root)
if err != nil {
return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}
// skip deleting storages for EmptyTrie
if _, ok := tr.(*trie.EmptyTrie); ok {
return false, nil, nil, nil
return false, 0, nil, nil, nil
}
it, err := tr.NodeIterator(nil)
if err != nil {
return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
}
var (
set = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
stateSize common.StorageSize
nodeSize common.StorageSize
size common.StorageSize
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
for it.Next(true) {
// arbitrary stateSize limit, make it configurable
if stateSize+nodeSize > 512*1024*1024 {
log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize)
if metrics.EnabledExpensive {
slotDeletionSkip.Inc(1)
}
return true, nil, nil, nil
if size > storageDeleteLimit {
return true, size, nil, nil, nil
}
if it.Leaf() {
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob()))
size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
continue
}
if it.Hash() == (common.Hash{}) {
continue
}
nodeSize += common.StorageSize(len(it.Path()))
set.AddNode(it.Path(), trienode.NewDeleted())
size += common.StorageSize(len(it.Path()))
nodes.AddNode(it.Path(), trienode.NewDeleted())
}
if err := it.Error(); err != nil {
return false, 0, nil, nil, err
}
return false, size, slots, nodes, nil
}

// deleteStorage is designed to delete the storage trie of a designated account.
// It could potentially be terminated if the storage size is excessively large,
// potentially leading to an out-of-memory panic. The function will make an attempt
// to utilize an efficient strategy if the associated state snapshot is reachable;
// otherwise, it will resort to a less-efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
var (
start = time.Now()
err error
aborted bool
size common.StorageSize
slots map[common.Hash][]byte
nodes *trienode.NodeSet
)
// The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
if s.snap != nil {
aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
}
if s.snap == nil || err != nil {
aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
}
if err != nil {
return false, nil, nil, err
}
if metrics.EnabledExpensive {
if int64(len(slots)) > slotDeletionMaxCount.Value() {
slotDeletionMaxCount.Update(int64(len(slots)))
if aborted {
slotDeletionSkip.Inc(1)
}
n := int64(len(slots))
if n > slotDeletionMaxCount.Value() {
slotDeletionMaxCount.Update(n)
}
if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() {
slotDeletionMaxSize.Update(int64(stateSize + nodeSize))
if int64(size) > slotDeletionMaxSize.Value() {
slotDeletionMaxSize.Update(int64(size))
}
slotDeletionTimer.UpdateSince(start)
slotDeletionCount.Mark(int64(len(slots)))
slotDeletionSize.Mark(int64(stateSize + nodeSize))
slotDeletionCount.Mark(n)
slotDeletionSize.Mark(int64(size))
}
return false, slots, set, nil
return aborted, slots, nodes, nil
}

// handleDestruction processes all destruction markers and deletes the account
Expand Down
21 changes: 17 additions & 4 deletions core/state/statedb_fuzz_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,12 @@ import (

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/triestate"
)

Expand Down Expand Up @@ -179,16 +181,28 @@ func (test *stateTest) run() bool {
storageList = append(storageList, copy2DSet(states.Storages))
}
disk = rawdb.NewMemoryDatabase()
tdb = trie.NewDatabase(disk, &trie.Config{OnCommit: onCommit})
tdb = trie.NewDatabase(disk, &trie.Config{OnCommit: onCommit, PathDB: pathdb.Defaults})
sdb = NewDatabaseWithNodeDB(disk, tdb)
byzantium = rand.Intn(2) == 0
)
defer disk.Close()
defer tdb.Close()

var snaps *snapshot.Tree
if rand.Intn(3) == 0 {
snaps, _ = snapshot.New(snapshot.Config{
CacheSize: 1,
Recovery: false,
NoBuild: false,
AsyncBuild: false,
}, disk, tdb, types.EmptyRootHash, 128, false)
}
for i, actions := range test.actions {
root := types.EmptyRootHash
if i != 0 {
root = roots[len(roots)-1]
}
state, err := New(root, sdb, nil)
state, err := New(root, sdb, snaps)
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -365,8 +379,7 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Datab
return nil
}

// TODO(Nathan): enable this case after enabling pbss
func testStateChanges(t *testing.T) {
func TestStateChanges(t *testing.T) {
config := &quick.Config{MaxCount: 1000}
err := quick.Check((*stateTest).run, config)
if cerr, ok := err.(*quick.CheckError); ok {
Expand Down
Loading
Loading