diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index c64b08fed7..0e53ea768c 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1019,7 +1019,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. if err != nil { return nil, err } - leaves, err := a.State.GetLeafsByL1InfoRoot(ctx, *l1InfoRoot, nil) + leaves, err := a.State.GetLeavesByL1InfoRoot(ctx, *l1InfoRoot, nil) if err != nil { return nil, err } diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index e51b6d770e..9dcb0a0586 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -801,7 +801,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(nil, errBanana).Once() @@ -844,7 +844,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -888,7 +888,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -932,7 +932,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -989,7 +989,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 0bdcc27d37..46c5de64c2 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -66,7 +66,7 @@ type stateInterface interface { CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) - GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) + GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) GetVirtualBatchParentHash(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetForcedBatchParentHash(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetVirtualBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.VirtualBatch, error) diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go index cfc5b66e7d..24d5768523 100644 --- a/aggregator/mocks/mock_state.go +++ b/aggregator/mocks/mock_state.go @@ -295,12 +295,12 @@ func (_m *StateMock) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*st return r0, r1 } -// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx -func (_m *StateMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { +// GetLeavesByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx +func (_m *StateMock) GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { ret := _m.Called(ctx, l1InfoRoot, dbTx) if len(ret) == 0 { - panic("no return value specified for GetLeafsByL1InfoRoot") + panic("no return value specified for GetLeavesByL1InfoRoot") } var r0 []state.L1InfoTreeExitRootStorageEntry diff --git a/cmd/run.go b/cmd/run.go index eab27ec17c..5a874ccf37 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -39,6 +39,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/synchronizer" "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces" + "github.com/ethereum/go-ethereum/ethclient" "github.com/jackc/pgx/v4/pgxpool" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/urfave/cli/v2" @@ -287,6 +288,15 @@ func newEtherman(c config.Config) (*etherman.Client, error) { return etherman.NewClient(c.Etherman, c.NetworkConfig.L1Config) } +func newL2EthClient(url string) (*ethclient.Client, error) { + ethClient, err := ethclient.Dial(url) + if err != nil { + log.Errorf("error connecting L1 to %s: %+v", url, err) + return nil, err + } + return ethClient, nil +} + func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerStorage *ethtxmanager.PostgresStorage, st *state.State, pool *pool.Pool, eventLog *event.EventLog) { var trustedSequencerURL string var err error @@ -302,6 +312,17 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerS } log.Info("trustedSequencerURL ", trustedSequencerURL) } + var ethClientForL2 *ethclient.Client + if trustedSequencerURL != "" { + log.Infof("Creating L2 ethereum client %s", trustedSequencerURL) + ethClientForL2, err = newL2EthClient(trustedSequencerURL) + if err != nil { + log.Fatalf("Can't create L2 ethereum client. Err:%w", err) + } + } else { + ethClientForL2 = nil + log.Infof("skipping creating L2 ethereum client because URL is empty") + } zkEVMClient := client.NewClient(trustedSequencerURL) etherManForL1 := []syncinterfaces.EthermanFullInterface{} // If synchronizer are using sequential mode, we only need one etherman client @@ -325,7 +346,7 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerS etm := ethtxmanager.New(cfg.EthTxManager, etherman, ethTxManagerStorage, st) sy, err := synchronizer.NewSynchronizer( cfg.IsTrustedSequencer, etherman, etherManForL1, st, pool, etm, - zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, cfg.Log.Environment == "development", + zkEVMClient, ethClientForL2, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, cfg.Log.Environment == "development", ) if err != nil { log.Fatal(err) diff --git a/config/default.go b/config/default.go index ace12e4e7c..9d333cff8b 100644 --- a/config/default.go +++ b/config/default.go @@ -139,6 +139,7 @@ DisableAPIs = [] SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +SyncBlockProtection = "safe" # latest, finalized, safe L1SynchronizationMode = "sequential" L1SyncCheckL2BlockHash = true L1SyncCheckL2BlockNumberhModulus = 30 diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index af358f1a26..aee5684a51 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -84,6 +84,7 @@ EnableL2SuggestedGasPricePolling = true SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +SyncBlockProtection = "latest" # latest, finalized, safe [Sequencer] DeletePoolTxsL1BlockConfirmations = 100 diff --git a/db/migrations/state/0018.sql b/db/migrations/state/0018.sql new file mode 100644 index 0000000000..3d9db107c1 --- /dev/null +++ b/db/migrations/state/0018.sql @@ -0,0 +1,11 @@ +-- +migrate Up +ALTER TABLE state.block + ADD COLUMN IF NOT EXISTS checked BOOL NOT NULL DEFAULT FALSE; + +-- set block.checked to true for all blocks below max - 100 +UPDATE state.block SET checked = true WHERE block_num <= (SELECT MAX(block_num) - 1000 FROM state.block); + +-- +migrate Down +ALTER TABLE state.block + DROP COLUMN IF EXISTS checked; + diff --git a/db/migrations/state/0018_test.go b/db/migrations/state/0018_test.go new file mode 100644 index 0000000000..b8a51dbb49 --- /dev/null +++ b/db/migrations/state/0018_test.go @@ -0,0 +1,69 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type migrationTest0018 struct{} + +func (m migrationTest0018) InsertData(db *sql.DB) error { + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + if _, err := db.Exec(addBlock, 50, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + if _, err := db.Exec(addBlock, 1050, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + return nil +} + +func (m migrationTest0018) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + var checked bool + row := db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 1) + assert.NoError(t, row.Scan(&checked)) + assert.Equal(t, true, checked) + row = db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 50) + assert.NoError(t, row.Scan(&checked)) + assert.Equal(t, true, checked) + row = db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 1050) + assert.NoError(t, row.Scan(&checked)) + assert.Equal(t, false, checked) + + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash, checked) VALUES ($1, $2, $3, $4)" + _, err := db.Exec(addBlock, 2, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", true) + assert.NoError(t, err) + _, err = db.Exec(addBlock, 3, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", false) + assert.NoError(t, err) + const sql = `SELECT count(*) FROM state.block WHERE checked = true` + row = db.QueryRow(sql) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 3, result, "must be 1,50 per migration and 2 by insert") + + const sqlCheckedFalse = `SELECT count(*) FROM state.block WHERE checked = false` + row = db.QueryRow(sqlCheckedFalse) + + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 2, result, "must be 150 by migration, and 3 by insert") +} + +func (m migrationTest0018) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + var result int + + // Check column wip doesn't exists in state.batch table + const sql = `SELECT count(*) FROM state.block` + row := db.QueryRow(sql) + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 5, result) +} + +func TestMigration0018(t *testing.T) { + runMigrationTest(t, 18, migrationTest0018{}) +} diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index af5035774c..323fbfe5b9 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -22,7 +22,7 @@
"300ms"
 

Default: falseType: boolean

EnableInnerTxCacheDB enables the inner tx cache db


Type: array of integer

BridgeAddress is the address of the bridge contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:

Type: integer

Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not
Default: "1s"Type: string

SyncInterval is the delay interval between reading new rollup information


Examples:

"1m"
 
"300ms"
-

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: trueType: boolean

L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)


Default: 30Type: integer

L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
+

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: "safe"Type: string

SyncBlockProtection specify the state to sync (lastest, finalized or safe)


Default: trueType: boolean

L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)


Default: 30Type: integer

L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
 
"300ms"
 

Consumer Configuration for the consumer of rollup information from L1
Default: "5s"Type: string

AceptableInacctivityTime is the expected maximum time that the consumer
could wait until new data is produced. If the time is greater it emmit a log to warn about
that. The idea is keep working the consumer as much as possible, so if the producer is not
fast enought then you could increse the number of parallel clients to sync with L1


Examples:

"1m"
 
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 0d83b60cb3..2065006ec9 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -2174,6 +2174,7 @@ because depending of this values is going to ask to a trusted node for trusted t
 | - [SyncInterval](#Synchronizer_SyncInterval )                                         | No      | string           | No         | -          | Duration                                                                                                                                                                                                                                                |
 | - [SyncChunkSize](#Synchronizer_SyncChunkSize )                                       | No      | integer          | No         | -          | SyncChunkSize is the number of blocks to sync on each chunk                                                                                                                                                                                             |
 | - [TrustedSequencerURL](#Synchronizer_TrustedSequencerURL )                           | No      | string           | No         | -          | TrustedSequencerURL is the rpc url to connect and sync the trusted state                                                                                                                                                                                |
+| - [SyncBlockProtection](#Synchronizer_SyncBlockProtection )                           | No      | string           | No         | -          | SyncBlockProtection specify the state to sync (lastest, finalized or safe)                                                                                                                                                                              |
 | - [L1SyncCheckL2BlockHash](#Synchronizer_L1SyncCheckL2BlockHash )                     | No      | boolean          | No         | -          | L1SyncCheckL2BlockHash if is true when a batch is closed is force to check  L2Block hash against trustedNode (only apply for permissionless)                                                                                                            |
 | - [L1SyncCheckL2BlockNumberhModulus](#Synchronizer_L1SyncCheckL2BlockNumberhModulus ) | No      | integer          | No         | -          | L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...) | | - [L1SynchronizationMode](#Synchronizer_L1SynchronizationMode ) | No | enum (of string) | No | - | L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute | @@ -2234,7 +2235,21 @@ SyncChunkSize=100 TrustedSequencerURL="" ``` -### 9.4. `Synchronizer.L1SyncCheckL2BlockHash` +### 9.4. `Synchronizer.SyncBlockProtection` + +**Type:** : `string` + +**Default:** `"safe"` + +**Description:** SyncBlockProtection specify the state to sync (lastest, finalized or safe) + +**Example setting the default value** ("safe"): +``` +[Synchronizer] +SyncBlockProtection="safe" +``` + +### 9.5. `Synchronizer.L1SyncCheckL2BlockHash` **Type:** : `boolean` @@ -2248,7 +2263,7 @@ TrustedSequencerURL="" L1SyncCheckL2BlockHash=true ``` -### 9.5. `Synchronizer.L1SyncCheckL2BlockNumberhModulus` +### 9.6. `Synchronizer.L1SyncCheckL2BlockNumberhModulus` **Type:** : `integer` @@ -2263,7 +2278,7 @@ a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...) L1SyncCheckL2BlockNumberhModulus=30 ``` -### 9.6. `Synchronizer.L1SynchronizationMode` +### 9.7. `Synchronizer.L1SynchronizationMode` **Type:** : `enum (of string)` @@ -2283,7 +2298,7 @@ Must be one of: * "sequential" * "parallel" -### 9.7. `[Synchronizer.L1ParallelSynchronization]` +### 9.8. `[Synchronizer.L1ParallelSynchronization]` **Type:** : `object` **Description:** L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel') @@ -2301,7 +2316,7 @@ Must be one of: | - [RollupInfoRetriesSpacing](#Synchronizer_L1ParallelSynchronization_RollupInfoRetriesSpacing ) | No | string | No | - | Duration | | - [FallbackToSequentialModeOnSynchronized](#Synchronizer_L1ParallelSynchronization_FallbackToSequentialModeOnSynchronized ) | No | boolean | No | - | FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized | -#### 9.7.1. `Synchronizer.L1ParallelSynchronization.MaxClients` +#### 9.8.1. `Synchronizer.L1ParallelSynchronization.MaxClients` **Type:** : `integer` @@ -2315,7 +2330,7 @@ Must be one of: MaxClients=10 ``` -#### 9.7.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks` +#### 9.8.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks` **Type:** : `integer` @@ -2330,7 +2345,7 @@ sugested twice of NumberOfParallelOfEthereumClients MaxPendingNoProcessedBlocks=25 ``` -#### 9.7.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod` +#### 9.8.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod` **Title:** Duration @@ -2358,7 +2373,7 @@ This value only apply when the system is synchronized RequestLastBlockPeriod="5s" ``` -#### 9.7.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]` +#### 9.8.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]` **Type:** : `object` **Description:** Consumer Configuration for the consumer of rollup information from L1 @@ -2368,7 +2383,7 @@ RequestLastBlockPeriod="5s" | - [AceptableInacctivityTime](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_AceptableInacctivityTime ) | No | string | No | - | Duration | | - [ApplyAfterNumRollupReceived](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_ApplyAfterNumRollupReceived ) | No | integer | No | - | ApplyAfterNumRollupReceived is the number of iterations to
start checking the time waiting for new rollup info data | -##### 9.7.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime` +##### 9.8.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime` **Title:** Duration @@ -2397,7 +2412,7 @@ fast enought then you could increse the number of parallel clients to sync with AceptableInacctivityTime="5s" ``` -##### 9.7.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived` +##### 9.8.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived` **Type:** : `integer` @@ -2412,7 +2427,7 @@ start checking the time waiting for new rollup info data ApplyAfterNumRollupReceived=10 ``` -#### 9.7.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout` +#### 9.8.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout` **Title:** Duration @@ -2438,7 +2453,7 @@ ApplyAfterNumRollupReceived=10 RequestLastBlockTimeout="5s" ``` -#### 9.7.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries` +#### 9.8.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries` **Type:** : `integer` @@ -2452,7 +2467,7 @@ RequestLastBlockTimeout="5s" RequestLastBlockMaxRetries=3 ``` -#### 9.7.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod` +#### 9.8.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod` **Title:** Duration @@ -2478,7 +2493,7 @@ RequestLastBlockMaxRetries=3 StatisticsPeriod="5m0s" ``` -#### 9.7.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop` +#### 9.8.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop` **Title:** Duration @@ -2504,7 +2519,7 @@ StatisticsPeriod="5m0s" TimeOutMainLoop="5m0s" ``` -#### 9.7.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing` +#### 9.8.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing` **Title:** Duration @@ -2530,7 +2545,7 @@ TimeOutMainLoop="5m0s" RollupInfoRetriesSpacing="5s" ``` -#### 9.7.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized` +#### 9.8.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized` **Type:** : `boolean` @@ -2544,7 +2559,7 @@ RollupInfoRetriesSpacing="5s" FallbackToSequentialModeOnSynchronized=false ``` -### 9.8. `[Synchronizer.L2Synchronization]` +### 9.9. `[Synchronizer.L2Synchronization]` **Type:** : `object` **Description:** L2Synchronization Configuration for L2 synchronization @@ -2555,7 +2570,7 @@ FallbackToSequentialModeOnSynchronized=false | - [ReprocessFullBatchOnClose](#Synchronizer_L2Synchronization_ReprocessFullBatchOnClose ) | No | boolean | No | - | ReprocessFullBatchOnClose if is true when a batch is closed is force to reprocess again | | - [CheckLastL2BlockHashOnCloseBatch](#Synchronizer_L2Synchronization_CheckLastL2BlockHashOnCloseBatch ) | No | boolean | No | - | CheckLastL2BlockHashOnCloseBatch if is true when a batch is closed is force to check the last L2Block hash | -#### 9.8.1. `Synchronizer.L2Synchronization.AcceptEmptyClosedBatches` +#### 9.9.1. `Synchronizer.L2Synchronization.AcceptEmptyClosedBatches` **Type:** : `boolean` @@ -2570,7 +2585,7 @@ if true, the synchronizer will accept empty batches and process them. AcceptEmptyClosedBatches=false ``` -#### 9.8.2. `Synchronizer.L2Synchronization.ReprocessFullBatchOnClose` +#### 9.9.2. `Synchronizer.L2Synchronization.ReprocessFullBatchOnClose` **Type:** : `boolean` @@ -2584,7 +2599,7 @@ AcceptEmptyClosedBatches=false ReprocessFullBatchOnClose=false ``` -#### 9.8.3. `Synchronizer.L2Synchronization.CheckLastL2BlockHashOnCloseBatch` +#### 9.9.3. `Synchronizer.L2Synchronization.CheckLastL2BlockHashOnCloseBatch` **Type:** : `boolean` diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json index 21df216b4b..eabbf0eb7a 100644 --- a/docs/config-file/node-config-schema.json +++ b/docs/config-file/node-config-schema.json @@ -853,6 +853,11 @@ "description": "TrustedSequencerURL is the rpc url to connect and sync the trusted state", "default": "" }, + "SyncBlockProtection": { + "type": "string", + "description": "SyncBlockProtection specify the state to sync (lastest, finalized or safe)", + "default": "safe" + }, "L1SyncCheckL2BlockHash": { "type": "boolean", "description": "L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)", diff --git a/etherman/etherman.go b/etherman/etherman.go index 9dd8e70cb7..05b9ae5136 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -1188,7 +1188,6 @@ func (etherMan *Client) forcedBatchEvent(ctx context.Context, vLog types.Log, bl func (etherMan *Client) sequencedBatchesEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { log.Debugf("SequenceBatches event detected: txHash: %s", common.Bytes2Hex(vLog.TxHash[:])) - //tx,isPending, err:=etherMan.EthClient.TransactionByHash(ctx, vLog.TxHash) sb, err := etherMan.ZkEVM.ParseSequenceBatches(vLog) if err != nil { diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go index 7c3e17a555..ffeeed4535 100644 --- a/jsonrpc/endpoints_zkevm.go +++ b/jsonrpc/endpoints_zkevm.go @@ -552,7 +552,7 @@ func (z *ZKEVMEndpoints) internalEstimateGasPriceAndFee(ctx context.Context, arg if txEGP.Cmp(txGasPrice) == -1 { // txEGP < txGasPrice // We need to "round" the final effectiveGasPrice to a 256 fraction of the txGasPrice - txEGPPct, err = z.pool.CalculateEffectiveGasPricePercentage(txGasPrice, txEGP) + txEGPPct, err = state.CalculateEffectiveGasPricePercentage(txGasPrice, txEGP) if err != nil { return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to calculate effective gas price percentage", err, false) } diff --git a/jsonrpc/mocks/mock_pool.go b/jsonrpc/mocks/mock_pool.go index 7f07d1dc28..7f4e7c2452 100644 --- a/jsonrpc/mocks/mock_pool.go +++ b/jsonrpc/mocks/mock_pool.go @@ -70,34 +70,6 @@ func (_m *PoolMock) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int return r0, r1 } -// CalculateEffectiveGasPricePercentage provides a mock function with given fields: gasPrice, effectiveGasPrice -func (_m *PoolMock) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - ret := _m.Called(gasPrice, effectiveGasPrice) - - if len(ret) == 0 { - panic("no return value specified for CalculateEffectiveGasPricePercentage") - } - - var r0 uint8 - var r1 error - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (uint8, error)); ok { - return rf(gasPrice, effectiveGasPrice) - } - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) uint8); ok { - r0 = rf(gasPrice, effectiveGasPrice) - } else { - r0 = ret.Get(0).(uint8) - } - - if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { - r1 = rf(gasPrice, effectiveGasPrice) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CountPendingTransactions provides a mock function with given fields: ctx func (_m *PoolMock) CountPendingTransactions(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) diff --git a/jsonrpc/types/interfaces.go b/jsonrpc/types/interfaces.go index 936880987a..2a261b753f 100644 --- a/jsonrpc/types/interfaces.go +++ b/jsonrpc/types/interfaces.go @@ -24,7 +24,6 @@ type PoolInterface interface { GetTransactionByHash(ctx context.Context, hash common.Hash) (*pool.Transaction, error) GetTransactionByL2Hash(ctx context.Context, hash common.Hash) (*pool.Transaction, error) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int, txGasUsed uint64, l1GasPrice uint64, l2GasPrice uint64) (*big.Int, error) - CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) EffectiveGasPriceEnabled() bool AddInnerTx(ctx context.Context, txHash common.Hash, innerTx []byte) error GetInnerTx(ctx context.Context, txHash common.Hash) (string, error) diff --git a/l1infotree/tree.go b/l1infotree/tree.go index e0c19da6bf..d3fe48ed2f 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -26,7 +26,7 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) var err error mt.siblings, mt.currentRoot, err = mt.initSiblings(initialLeaves) if err != nil { - log.Error("error initializing si siblings. Error: ", err) + log.Error("error initializing siblings. Error: ", err) return nil, err } log.Debug("Initial count: ", mt.count) @@ -34,6 +34,25 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) return mt, nil } +// ResetL1InfoTree resets the L1InfoTree. +func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, error) { + log.Info("Resetting L1InfoTree...") + newMT := &L1InfoTree{ + zeroHashes: generateZeroHashes(32), // nolint:gomnd + height: 32, // nolint:gomnd + count: uint32(len(initialLeaves)), + } + var err error + newMT.siblings, newMT.currentRoot, err = newMT.initSiblings(initialLeaves) + if err != nil { + log.Error("error initializing siblings. Error: ", err) + return nil, err + } + log.Debug("Reset initial count: ", newMT.count) + log.Debug("Reset initial root: ", newMT.currentRoot) + return newMT, nil +} + func buildIntermediate(leaves [][32]byte) ([][][]byte, [][32]byte) { var ( nodes [][][]byte diff --git a/pool/effectivegasprice.go b/pool/effectivegasprice.go index 98d488885d..c9ad433774 100644 --- a/pool/effectivegasprice.go +++ b/pool/effectivegasprice.go @@ -2,21 +2,12 @@ package pool import ( "bytes" - "errors" "math/big" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" ) -var ( - // ErrEffectiveGasPriceEmpty happens when the effectiveGasPrice or gasPrice is nil or zero - ErrEffectiveGasPriceEmpty = errors.New("effectiveGasPrice or gasPrice cannot be nil or zero") - - // ErrEffectiveGasPriceIsZero happens when the calculated EffectiveGasPrice is zero - ErrEffectiveGasPriceIsZero = errors.New("effectiveGasPrice cannot be zero") -) - // EffectiveGasPrice implements the effective gas prices calculations and checks type EffectiveGasPrice struct { cfg EffectiveGasPriceCfg @@ -122,33 +113,8 @@ func (e *EffectiveGasPrice) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice bfEffectiveGasPrice.Int(effectiveGasPrice) if effectiveGasPrice.Cmp(new(big.Int).SetUint64(0)) == 0 { - return nil, ErrEffectiveGasPriceIsZero + return nil, state.ErrEffectiveGasPriceIsZero } return effectiveGasPrice, nil } - -// CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage -func (e *EffectiveGasPrice) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - const bits = 256 - var bitsBigInt = big.NewInt(bits) - - if effectiveGasPrice == nil || gasPrice == nil || - gasPrice.Cmp(big.NewInt(0)) == 0 || effectiveGasPrice.Cmp(big.NewInt(0)) == 0 { - return 0, ErrEffectiveGasPriceEmpty - } - - if gasPrice.Cmp(effectiveGasPrice) <= 0 { - return state.MaxEffectivePercentage, nil - } - - // Simulate Ceil with integer division - b := new(big.Int).Mul(effectiveGasPrice, bitsBigInt) - b = b.Add(b, gasPrice) - b = b.Sub(b, big.NewInt(1)) //nolint:gomnd - b = b.Div(b, gasPrice) - // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) - b = b.Sub(b, big.NewInt(1)) //nolint:gomnd - - return uint8(b.Uint64()), nil -} diff --git a/pool/effectivegasprice_test.go b/pool/effectivegasprice_test.go index 96f5a17b9d..c353efdafb 100644 --- a/pool/effectivegasprice_test.go +++ b/pool/effectivegasprice_test.go @@ -4,6 +4,7 @@ import ( "math/big" "testing" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/stretchr/testify/assert" ) @@ -23,8 +24,6 @@ var ( ) func TestCalculateEffectiveGasPricePercentage(t *testing.T) { - egp := NewEffectiveGasPrice(egpCfg) - testCases := []struct { name string breakEven *big.Int @@ -37,14 +36,14 @@ func TestCalculateEffectiveGasPricePercentage(t *testing.T) { name: "Nil breakEven or gasPrice", gasPrice: big.NewInt(1), expectedValue: uint8(0), - err: ErrEffectiveGasPriceEmpty, + err: state.ErrEffectiveGasPriceEmpty, }, { name: "Zero breakEven or gasPrice", breakEven: big.NewInt(1), gasPrice: big.NewInt(0), expectedValue: uint8(0), - err: ErrEffectiveGasPriceEmpty, + err: state.ErrEffectiveGasPriceEmpty, }, { name: "Both positive, gasPrice less than breakEven", @@ -104,7 +103,7 @@ func TestCalculateEffectiveGasPricePercentage(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := egp.CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) + actual, err := state.CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) assert.Equal(t, tc.err, err) if actual != 0 { assert.Equal(t, tc.expectedValue, actual) diff --git a/pool/pool.go b/pool/pool.go index 5df2b917d7..8ac72acce6 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -715,7 +715,7 @@ func (p *Pool) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int, txG // CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage func (p *Pool) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - return p.effectiveGasPrice.CalculateEffectiveGasPricePercentage(gasPrice, effectiveGasPrice) + return state.CalculateEffectiveGasPricePercentage(gasPrice, effectiveGasPrice) } // EffectiveGasPriceEnabled returns if effective gas price calculation is enabled or not diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 2f33961048..813ba3e5e6 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -479,7 +479,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first } } - egpPercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) + egpPercentage, err := state.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) if err != nil { if f.effectiveGasPrice.IsEnabled() { return nil, err @@ -600,7 +600,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx // If EffectiveGasPrice is disabled we will calculate the percentage and save it for later logging if !egpEnabled { - effectivePercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) + effectivePercentage, err := state.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) if err != nil { log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price percentage (#2), error: %v", err) tx.EGPLog.Error = fmt.Sprintf("%s, CalculateEffectiveGasPricePercentage#2: %s", tx.EGPLog.Error, err) diff --git a/state/block.go b/state/block.go index c5c9fbb1a2..7883770249 100644 --- a/state/block.go +++ b/state/block.go @@ -12,6 +12,7 @@ type Block struct { BlockHash common.Hash ParentHash common.Hash ReceivedAt time.Time + Checked bool } // NewBlock creates a block with the given data. diff --git a/state/effectivegasprice.go b/state/effectivegasprice.go new file mode 100644 index 0000000000..69477a147f --- /dev/null +++ b/state/effectivegasprice.go @@ -0,0 +1,44 @@ +package state + +import ( + "errors" + "math/big" +) + +const ( + // MaxEffectivePercentage is the maximum value that can be used as effective percentage + MaxEffectivePercentage = uint8(255) +) + +var ( + // ErrEffectiveGasPriceEmpty happens when the effectiveGasPrice or gasPrice is nil or zero + ErrEffectiveGasPriceEmpty = errors.New("effectiveGasPrice or gasPrice cannot be nil or zero") + + // ErrEffectiveGasPriceIsZero happens when the calculated EffectiveGasPrice is zero + ErrEffectiveGasPriceIsZero = errors.New("effectiveGasPrice cannot be zero") +) + +// CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage +func CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { + const bits = 256 + var bitsBigInt = big.NewInt(bits) + + if effectiveGasPrice == nil || gasPrice == nil || + gasPrice.Cmp(big.NewInt(0)) == 0 || effectiveGasPrice.Cmp(big.NewInt(0)) == 0 { + return 0, ErrEffectiveGasPriceEmpty + } + + if gasPrice.Cmp(effectiveGasPrice) <= 0 { + return MaxEffectivePercentage, nil + } + + // Simulate Ceil with integer division + b := new(big.Int).Mul(effectiveGasPrice, bitsBigInt) + b = b.Add(b, gasPrice) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd + b = b.Div(b, gasPrice) + // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd + + return uint8(b.Uint64()), nil +} diff --git a/state/helper.go b/state/helper.go index 300ffcdc99..3b37d121ee 100644 --- a/state/helper.go +++ b/state/helper.go @@ -18,8 +18,6 @@ const ( double = 2 ether155V = 27 etherPre155V = 35 - // MaxEffectivePercentage is the maximum value that can be used as effective percentage - MaxEffectivePercentage = uint8(255) // Decoding constants headerByteLength uint64 = 1 sLength uint64 = 32 diff --git a/state/interfaces.go b/state/interfaces.go index a1ad71f3ff..d813a17760 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -24,6 +24,8 @@ type storage interface { GetTxsOlderThanNL1BlocksUntilTxHash(ctx context.Context, nL1Blocks uint64, earliestTxHash common.Hash, dbTx pgx.Tx) ([]common.Hash, error) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*Block, error) GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*Block, error) + GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*Block, error) + UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error AddGlobalExitRoot(ctx context.Context, exitRoot *GlobalExitRoot, dbTx pgx.Tx) error GetLatestGlobalExitRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (GlobalExitRoot, time.Time, error) GetNumberOfBlocksSinceLastGERUpdate(ctx context.Context, dbTx pgx.Tx) (uint64, error) @@ -146,7 +148,7 @@ type storage interface { GetRawBatchTimestamps(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*time.Time, *time.Time, error) GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntry, error) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntry, error) - GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntry, error) + GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntry, error) GetBlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*Block, error) GetVirtualBatchParentHash(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetForcedBatchParentHash(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (common.Hash, error) diff --git a/state/l1infotree.go b/state/l1infotree.go index ea89d0e206..8cac9ea5d7 100644 --- a/state/l1infotree.go +++ b/state/l1infotree.go @@ -3,7 +3,6 @@ package state import ( "context" "errors" - "fmt" "github.com/0xPolygonHermez/zkevm-node/l1infotree" "github.com/0xPolygonHermez/zkevm-node/log" @@ -34,20 +33,20 @@ func (s *State) buildL1InfoTreeCacheIfNeed(ctx context.Context, dbTx pgx.Tx) err if s.l1InfoTree != nil { return nil } - log.Debugf("Building L1InfoTree cache") - allLeaves, err := s.storage.GetAllL1InfoRootEntries(ctx, dbTx) + // Reset L1InfoTree siblings and leaves + allLeaves, err := s.GetAllL1InfoRootEntries(ctx, dbTx) if err != nil { - log.Error("error getting all leaves. Error: ", err) - return fmt.Errorf("error getting all leaves. Error: %w", err) + log.Error("error getting all leaves to reset l1InfoTree. Error: ", err) + return err } var leaves [][32]byte for _, leaf := range allLeaves { leaves = append(leaves, leaf.Hash()) } - mt, err := l1infotree.NewL1InfoTree(uint8(32), leaves) //nolint:gomnd + mt, err := s.l1InfoTree.ResetL1InfoTree(leaves) if err != nil { - log.Error("error creating L1InfoTree. Error: ", err) - return fmt.Errorf("error creating L1InfoTree. Error: %w", err) + log.Error("error resetting l1InfoTree. Error: ", err) + return err } s.l1InfoTree = mt return nil diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index 2574697028..2b03479dee 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -2380,6 +2380,66 @@ func (_c *StorageMock_GetFirstL2BlockNumberForBatchNumber_Call) RunAndReturn(run return _c } +// GetFirstUncheckedBlock provides a mock function with given fields: ctx, fromBlockNumber, dbTx +func (_m *StorageMock) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, fromBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstUncheckedBlock") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, fromBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, fromBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StorageMock_GetFirstUncheckedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstUncheckedBlock' +type StorageMock_GetFirstUncheckedBlock_Call struct { + *mock.Call +} + +// GetFirstUncheckedBlock is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) GetFirstUncheckedBlock(ctx interface{}, fromBlockNumber interface{}, dbTx interface{}) *StorageMock_GetFirstUncheckedBlock_Call { + return &StorageMock_GetFirstUncheckedBlock_Call{Call: _e.mock.On("GetFirstUncheckedBlock", ctx, fromBlockNumber, dbTx)} +} + +func (_c *StorageMock_GetFirstUncheckedBlock_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetFirstUncheckedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_GetFirstUncheckedBlock_Call) Return(_a0 *state.Block, _a1 error) *StorageMock_GetFirstUncheckedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StorageMock_GetFirstUncheckedBlock_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StorageMock_GetFirstUncheckedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetForcedBatch provides a mock function with given fields: ctx, forcedBatchNumber, dbTx func (_m *StorageMock) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) { ret := _m.Called(ctx, forcedBatchNumber, dbTx) @@ -4947,12 +5007,12 @@ func (_c *StorageMock_GetLatestVirtualBatchTimestamp_Call) RunAndReturn(run func return _c } -// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx -func (_m *StorageMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { +// GetLeavesByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx +func (_m *StorageMock) GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { ret := _m.Called(ctx, l1InfoRoot, dbTx) if len(ret) == 0 { - panic("no return value specified for GetLeafsByL1InfoRoot") + panic("no return value specified for GetLeavesByL1InfoRoot") } var r0 []state.L1InfoTreeExitRootStorageEntry @@ -4977,32 +5037,32 @@ func (_m *StorageMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot comm return r0, r1 } -// StorageMock_GetLeafsByL1InfoRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLeafsByL1InfoRoot' -type StorageMock_GetLeafsByL1InfoRoot_Call struct { +// StorageMock_GetLeavesByL1InfoRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLeavesByL1InfoRoot' +type StorageMock_GetLeavesByL1InfoRoot_Call struct { *mock.Call } -// GetLeafsByL1InfoRoot is a helper method to define mock.On call +// GetLeavesByL1InfoRoot is a helper method to define mock.On call // - ctx context.Context // - l1InfoRoot common.Hash // - dbTx pgx.Tx -func (_e *StorageMock_Expecter) GetLeafsByL1InfoRoot(ctx interface{}, l1InfoRoot interface{}, dbTx interface{}) *StorageMock_GetLeafsByL1InfoRoot_Call { - return &StorageMock_GetLeafsByL1InfoRoot_Call{Call: _e.mock.On("GetLeafsByL1InfoRoot", ctx, l1InfoRoot, dbTx)} +func (_e *StorageMock_Expecter) GetLeavesByL1InfoRoot(ctx interface{}, l1InfoRoot interface{}, dbTx interface{}) *StorageMock_GetLeavesByL1InfoRoot_Call { + return &StorageMock_GetLeavesByL1InfoRoot_Call{Call: _e.mock.On("GetLeavesByL1InfoRoot", ctx, l1InfoRoot, dbTx)} } -func (_c *StorageMock_GetLeafsByL1InfoRoot_Call) Run(run func(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx)) *StorageMock_GetLeafsByL1InfoRoot_Call { +func (_c *StorageMock_GetLeavesByL1InfoRoot_Call) Run(run func(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx)) *StorageMock_GetLeavesByL1InfoRoot_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(common.Hash), args[2].(pgx.Tx)) }) return _c } -func (_c *StorageMock_GetLeafsByL1InfoRoot_Call) Return(_a0 []state.L1InfoTreeExitRootStorageEntry, _a1 error) *StorageMock_GetLeafsByL1InfoRoot_Call { +func (_c *StorageMock_GetLeavesByL1InfoRoot_Call) Return(_a0 []state.L1InfoTreeExitRootStorageEntry, _a1 error) *StorageMock_GetLeavesByL1InfoRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *StorageMock_GetLeafsByL1InfoRoot_Call) RunAndReturn(run func(context.Context, common.Hash, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error)) *StorageMock_GetLeafsByL1InfoRoot_Call { +func (_c *StorageMock_GetLeavesByL1InfoRoot_Call) RunAndReturn(run func(context.Context, common.Hash, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error)) *StorageMock_GetLeavesByL1InfoRoot_Call { _c.Call.Return(run) return _c } @@ -8152,6 +8212,55 @@ func (_c *StorageMock_UpdateBatchL2Data_Call) RunAndReturn(run func(context.Cont return _c } +// UpdateCheckedBlockByNumber provides a mock function with given fields: ctx, blockNumber, newCheckedStatus, dbTx +func (_m *StorageMock) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + ret := _m.Called(ctx, blockNumber, newCheckedStatus, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateCheckedBlockByNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, bool, pgx.Tx) error); ok { + r0 = rf(ctx, blockNumber, newCheckedStatus, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StorageMock_UpdateCheckedBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCheckedBlockByNumber' +type StorageMock_UpdateCheckedBlockByNumber_Call struct { + *mock.Call +} + +// UpdateCheckedBlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - newCheckedStatus bool +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) UpdateCheckedBlockByNumber(ctx interface{}, blockNumber interface{}, newCheckedStatus interface{}, dbTx interface{}) *StorageMock_UpdateCheckedBlockByNumber_Call { + return &StorageMock_UpdateCheckedBlockByNumber_Call{Call: _e.mock.On("UpdateCheckedBlockByNumber", ctx, blockNumber, newCheckedStatus, dbTx)} +} + +func (_c *StorageMock_UpdateCheckedBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx)) *StorageMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(bool), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_UpdateCheckedBlockByNumber_Call) Return(_a0 error) *StorageMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StorageMock_UpdateCheckedBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, bool, pgx.Tx) error) *StorageMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // UpdateForkIDBlockNumber provides a mock function with given fields: ctx, forkdID, newBlockNumber, updateMemCache, dbTx func (_m *StorageMock) UpdateForkIDBlockNumber(ctx context.Context, forkdID uint64, newBlockNumber uint64, updateMemCache bool, dbTx pgx.Tx) error { ret := _m.Called(ctx, forkdID, newBlockNumber, updateMemCache, dbTx) diff --git a/state/pgstatestorage/block.go b/state/pgstatestorage/block.go index f2ae7abd17..768b384df1 100644 --- a/state/pgstatestorage/block.go +++ b/state/pgstatestorage/block.go @@ -16,10 +16,10 @@ const ( // AddBlock adds a new block to the State Store func (p *PostgresStorage) AddBlock(ctx context.Context, block *state.Block, dbTx pgx.Tx) error { - const addBlockSQL = "INSERT INTO state.block (block_num, block_hash, parent_hash, received_at) VALUES ($1, $2, $3, $4)" + const addBlockSQL = "INSERT INTO state.block (block_num, block_hash, parent_hash, received_at, checked) VALUES ($1, $2, $3, $4, $5)" e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, addBlockSQL, block.BlockNumber, block.BlockHash.String(), block.ParentHash.String(), block.ReceivedAt) + _, err := e.Exec(ctx, addBlockSQL, block.BlockNumber, block.BlockHash.String(), block.ParentHash.String(), block.ReceivedAt, block.Checked) return err } @@ -30,11 +30,11 @@ func (p *PostgresStorage) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state parentHash string block state.Block ) - const getLastBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at FROM state.block ORDER BY block_num DESC LIMIT 1" + const getLastBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at, checked FROM state.block ORDER BY block_num DESC LIMIT 1" q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, getLastBlockSQL).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt) + err := q.QueryRow(ctx, getLastBlockSQL).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrStateNotSynchronized } @@ -43,6 +43,26 @@ func (p *PostgresStorage) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state return &block, err } +// GetFirstUncheckedBlock returns the first L1 block that has not been checked from a given block number. +func (p *PostgresStorage) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + var ( + blockHash string + parentHash string + block state.Block + ) + const getLastBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at, checked FROM state.block WHERE block_num>=$1 AND checked=false ORDER BY block_num LIMIT 1" + + q := p.getExecQuerier(dbTx) + + err := q.QueryRow(ctx, getLastBlockSQL, fromBlockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) + if errors.Is(err, pgx.ErrNoRows) { + return nil, state.ErrNotFound + } + block.BlockHash = common.HexToHash(blockHash) + block.ParentHash = common.HexToHash(parentHash) + return &block, err +} + // GetPreviousBlock gets the offset previous L1 block respect to latest. func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*state.Block, error) { var ( @@ -50,11 +70,11 @@ func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, offset uint64, d parentHash string block state.Block ) - const getPreviousBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at FROM state.block ORDER BY block_num DESC LIMIT 1 OFFSET $1" + const getPreviousBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at,checked FROM state.block ORDER BY block_num DESC LIMIT 1 OFFSET $1" q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, getPreviousBlockSQL, offset).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt) + err := q.QueryRow(ctx, getPreviousBlockSQL, offset).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrNotFound } @@ -70,11 +90,11 @@ func (p *PostgresStorage) GetBlockByNumber(ctx context.Context, blockNumber uint parentHash string block state.Block ) - const getBlockByNumberSQL = "SELECT block_num, block_hash, parent_hash, received_at FROM state.block WHERE block_num = $1" + const getBlockByNumberSQL = "SELECT block_num, block_hash, parent_hash, received_at,checked FROM state.block WHERE block_num = $1" q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, getBlockByNumberSQL, blockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt) + err := q.QueryRow(ctx, getBlockByNumberSQL, blockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrNotFound } @@ -82,3 +102,14 @@ func (p *PostgresStorage) GetBlockByNumber(ctx context.Context, blockNumber uint block.ParentHash = common.HexToHash(parentHash) return &block, err } + +// UpdateCheckedBlockByNumber update checked flag for a block +func (p *PostgresStorage) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + const query = ` + UPDATE state.block + SET checked = $1 WHERE block_num = $2` + + e := p.getExecQuerier(dbTx) + _, err := e.Exec(ctx, query, newCheckedStatus, blockNumber) + return err +} diff --git a/state/pgstatestorage/l1infotree.go b/state/pgstatestorage/l1infotree.go index 450124dde2..ed3fe2dd38 100644 --- a/state/pgstatestorage/l1infotree.go +++ b/state/pgstatestorage/l1infotree.go @@ -112,7 +112,7 @@ func (p *PostgresStorage) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTr return entry, nil } -func (p *PostgresStorage) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { +func (p *PostgresStorage) GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { // TODO: Optimize this query const getLeafsByL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index FROM state.exit_root diff --git a/state/pgstatestorage/pgstatestorage.go b/state/pgstatestorage/pgstatestorage.go index c5c30371f7..ab33d6c21d 100644 --- a/state/pgstatestorage/pgstatestorage.go +++ b/state/pgstatestorage/pgstatestorage.go @@ -119,7 +119,7 @@ func (p *PostgresStorage) GetStateRootByBatchNumber(ctx context.Context, batchNu return common.HexToHash(stateRootStr), nil } -// GetLogsByBlockNumber get all the logs from a specific block ordered by log index +// GetLogsByBlockNumber get all the logs from a specific block ordered by tx index and log index func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) ([]*types.Log, error) { const query = ` SELECT t.l2_block_num, b.block_hash, l.tx_hash, r.tx_index, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 @@ -128,7 +128,7 @@ func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber INNER JOIN state.l2block b ON b.block_num = t.l2_block_num INNER JOIN state.receipt r ON r.tx_hash = t.hash WHERE b.block_num = $1 - ORDER BY l.log_index ASC` + ORDER BY r.tx_index ASC, l.log_index ASC` q := p.getExecQuerier(dbTx) rows, err := q.Query(ctx, query, blockNumber) diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 29b7f67717..416b21b47b 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -872,7 +872,7 @@ func TestGetLogs(t *testing.T) { ctx := context.Background() cfg := state.Config{ - MaxLogsCount: 8, + MaxLogsCount: 40, MaxLogsBlockRange: 10, ForkIDIntervals: stateCfg.ForkIDIntervals, } @@ -895,39 +895,69 @@ func TestGetLogs(t *testing.T) { time := time.Now() blockNumber := big.NewInt(1) - for i := 0; i < 3; i++ { - tx := types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: nil, - Value: new(big.Int), - Gas: 0, - GasPrice: big.NewInt(0), - }) - - logs := []*types.Log{} - for j := 0; j < 4; j++ { - logs = append(logs, &types.Log{TxHash: tx.Hash(), Index: uint(j)}) + maxBlocks := 3 + txsPerBlock := 4 + logsPerTx := 5 + + nonce := uint64(0) + + // number of blocks to be created + for b := 0; b < maxBlocks; b++ { + logIndex := uint(0) + transactions := make([]*types.Transaction, 0, txsPerBlock) + receipts := make([]*types.Receipt, 0, txsPerBlock) + stateRoots := make([]common.Hash, 0, txsPerBlock) + + // number of transactions in a block to be created + for t := 0; t < txsPerBlock; t++ { + nonce++ + txIndex := uint(t + 1) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + To: nil, + Value: new(big.Int), + Gas: 0, + GasPrice: big.NewInt(0), + }) + + logs := []*types.Log{} + + // if block is even logIndex follows a sequence related to the block + // for odd blocks logIndex follows a sequence related ot the tx + // this is needed to simulate a logIndex difference introduced on Etrog + // and we need to maintain to be able to synchronize these blocks + // number of logs in a transaction to be created + for l := 0; l < logsPerTx; l++ { + li := logIndex + if b%2 != 0 { // even block + li = uint(l) + } + + logs = append(logs, &types.Log{TxHash: tx.Hash(), TxIndex: txIndex, Index: li}) + logIndex++ + } + + receipt := &types.Receipt{ + Type: tx.Type(), + PostState: state.ZeroHash.Bytes(), + CumulativeGasUsed: 0, + EffectiveGasPrice: big.NewInt(0), + BlockNumber: blockNumber, + GasUsed: tx.Gas(), + TxHash: tx.Hash(), + TransactionIndex: txIndex, + Status: types.ReceiptStatusSuccessful, + Logs: logs, + } + + transactions = append(transactions, tx) + receipts = append(receipts, receipt) + stateRoots = append(stateRoots, state.ZeroHash) } - receipt := &types.Receipt{ - Type: tx.Type(), - PostState: state.ZeroHash.Bytes(), - CumulativeGasUsed: 0, - EffectiveGasPrice: big.NewInt(0), - BlockNumber: blockNumber, - GasUsed: tx.Gas(), - TxHash: tx.Hash(), - TransactionIndex: 0, - Status: types.ReceiptStatusSuccessful, - Logs: logs, - } - - transactions := []*types.Transaction{tx} - receipts := []*types.Receipt{receipt} - stateRoots := []common.Hash{state.ZeroHash} - header := state.NewL2Header(&types.Header{ - Number: big.NewInt(int64(i) + 1), + Number: big.NewInt(int64(b) + 1), ParentHash: state.ZeroHash, Coinbase: state.ZeroAddress, Root: state.ZeroHash, @@ -954,6 +984,8 @@ func TestGetLogs(t *testing.T) { require.NoError(t, err) } + require.NoError(t, dbTx.Commit(ctx)) + type testCase struct { name string from uint64 @@ -988,20 +1020,227 @@ func TestGetLogs(t *testing.T) { name: "logs returned successfully", from: 1, to: 2, - logCount: 8, + logCount: 40, expectedError: nil, }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - logs, err := testState.GetLogs(ctx, testCase.from, testCase.to, []common.Address{}, [][]common.Hash{}, nil, nil, dbTx) - + logs, err := testState.GetLogs(ctx, testCase.from, testCase.to, []common.Address{}, [][]common.Hash{}, nil, nil, nil) assert.Equal(t, testCase.logCount, len(logs)) assert.Equal(t, testCase.expectedError, err) + + // check tx index and log index order + lastBlockNumber := uint64(0) + lastTxIndex := uint(0) + lastLogIndex := uint(0) + + for i, l := range logs { + // if block has changed and it's not the first log, reset lastTxIndex + if uint(l.BlockNumber) != uint(lastBlockNumber) && i != 0 { + lastTxIndex = 0 + } + + if l.TxIndex < lastTxIndex { + t.Errorf("invalid tx index, expected greater than or equal to %v, but found %v", lastTxIndex, l.TxIndex) + } + // add tolerance for log index Etrog issue that was starting log indexes from 0 for each tx within a block + // if tx index has changed and the log index starts on zero, than resets the lastLogIndex to zero + if l.TxIndex != lastTxIndex && l.Index == 0 { + lastLogIndex = 0 + } + + if l.Index < lastLogIndex { + t.Errorf("invalid log index, expected greater than %v, but found %v", lastLogIndex, l.Index) + } + + lastBlockNumber = l.BlockNumber + lastTxIndex = l.TxIndex + lastLogIndex = l.Index + } + }) + } +} + +func TestGetLogsByBlockNumber(t *testing.T) { + initOrResetDB() + + ctx := context.Background() + + cfg := state.Config{ + MaxLogsCount: 40, + MaxLogsBlockRange: 10, + ForkIDIntervals: stateCfg.ForkIDIntervals, + } + + mt, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) + if err != nil { + panic(err) + } + testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(cfg, stateDb), executorClient, stateTree, nil, mt) + + dbTx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + err = testState.AddBlock(ctx, block, dbTx) + assert.NoError(t, err) + + batchNumber := uint64(1) + _, err = testState.Exec(ctx, "INSERT INTO state.batch (batch_num, wip) VALUES ($1, FALSE)", batchNumber) + assert.NoError(t, err) + + time := time.Now() + blockNumber := big.NewInt(1) + + maxBlocks := 3 + txsPerBlock := 4 + logsPerTx := 5 + + nonce := uint64(0) + + // number of blocks to be created + for b := 0; b < maxBlocks; b++ { + logIndex := uint(0) + transactions := make([]*types.Transaction, 0, txsPerBlock) + receipts := make([]*types.Receipt, 0, txsPerBlock) + stateRoots := make([]common.Hash, 0, txsPerBlock) + + // number of transactions in a block to be created + for t := 0; t < txsPerBlock; t++ { + nonce++ + txIndex := uint(t + 1) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + To: nil, + Value: new(big.Int), + Gas: 0, + GasPrice: big.NewInt(0), + }) + + logs := []*types.Log{} + + // if block is even logIndex follows a sequence related to the block + // for odd blocks logIndex follows a sequence related ot the tx + // this is needed to simulate a logIndex difference introduced on Etrog + // and we need to maintain to be able to synchronize these blocks + // number of logs in a transaction to be created + for l := 0; l < logsPerTx; l++ { + li := logIndex + if b%2 != 0 { // even block + li = uint(l) + } + + logs = append(logs, &types.Log{TxHash: tx.Hash(), TxIndex: txIndex, Index: li}) + logIndex++ + } + + receipt := &types.Receipt{ + Type: tx.Type(), + PostState: state.ZeroHash.Bytes(), + CumulativeGasUsed: 0, + EffectiveGasPrice: big.NewInt(0), + BlockNumber: blockNumber, + GasUsed: tx.Gas(), + TxHash: tx.Hash(), + TransactionIndex: txIndex, + Status: types.ReceiptStatusSuccessful, + Logs: logs, + } + + transactions = append(transactions, tx) + receipts = append(receipts, receipt) + stateRoots = append(stateRoots, state.ZeroHash) + } + + header := state.NewL2Header(&types.Header{ + Number: big.NewInt(int64(b) + 1), + ParentHash: state.ZeroHash, + Coinbase: state.ZeroAddress, + Root: state.ZeroHash, + GasUsed: 1, + GasLimit: 10, + Time: uint64(time.Unix()), }) + + st := trie.NewStackTrie(nil) + l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st) + for _, receipt := range receipts { + receipt.BlockHash = l2Block.Hash() + } + + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) + } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, stateRoots, dbTx) + require.NoError(t, err) } + require.NoError(t, dbTx.Commit(ctx)) + + type testCase struct { + name string + blockNumber uint64 + logCount int + expectedError error + } + + testCases := []testCase{ + { + name: "logs returned successfully", + blockNumber: 1, + logCount: 20, + expectedError: nil, + }, + { + name: "logs returned successfully", + blockNumber: 2, + logCount: 20, + expectedError: nil, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + logs, err := testState.GetLogsByBlockNumber(ctx, testCase.blockNumber, nil) + assert.Equal(t, testCase.logCount, len(logs)) + assert.Equal(t, testCase.expectedError, err) + + // check tx index and log index order + lastBlockNumber := uint64(0) + lastTxIndex := uint(0) + lastLogIndex := uint(0) + + for i, l := range logs { + // if block has changed and it's not the first log, reset lastTxIndex + if uint(l.BlockNumber) != uint(lastBlockNumber) && i != 0 { + lastTxIndex = 0 + } + + if l.TxIndex < lastTxIndex { + t.Errorf("invalid tx index, expected greater than or equal to %v, but found %v", lastTxIndex, l.TxIndex) + } + // add tolerance for log index Etrog issue that was starting log indexes from 0 for each tx within a block + // if tx index has changed and the log index starts on zero, than resets the lastLogIndex to zero + if l.TxIndex != lastTxIndex && l.Index == 0 { + lastLogIndex = 0 + } + + if l.Index < lastLogIndex { + t.Errorf("invalid log index, expected greater than %v, but found %v", lastLogIndex, l.Index) + } + + lastBlockNumber = l.BlockNumber + lastTxIndex = l.TxIndex + lastLogIndex = l.Index + } + }) + } } func TestGetNativeBlockHashesInRange(t *testing.T) { @@ -1411,3 +1650,40 @@ func TestGetLastGER(t *testing.T) { require.Equal(t, common.HexToHash("0x2").String(), ger.String()) } + +func TestGetFirstUncheckedBlock(t *testing.T) { + var err error + blockNumber := uint64(51001) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber, Checked: true}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 1, Checked: false}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 2, Checked: true}, nil) + require.NoError(t, err) + + block, err := testState.GetFirstUncheckedBlock(context.Background(), blockNumber, nil) + require.NoError(t, err) + require.Equal(t, uint64(blockNumber+1), block.BlockNumber) +} + +func TestUpdateCheckedBlockByNumber(t *testing.T) { + var err error + blockNumber := uint64(54001) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber, Checked: true}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 1, Checked: false}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 2, Checked: true}, nil) + require.NoError(t, err) + + b1, err := testState.GetBlockByNumber(context.Background(), uint64(blockNumber), nil) + require.NoError(t, err) + require.True(t, b1.Checked) + + err = testState.UpdateCheckedBlockByNumber(context.Background(), uint64(blockNumber), false, nil) + require.NoError(t, err) + + b1, err = testState.GetBlockByNumber(context.Background(), uint64(blockNumber), nil) + require.NoError(t, err) + require.False(t, b1.Checked) +} diff --git a/state/reset.go b/state/reset.go index 62571250e0..655f5f3dd1 100644 --- a/state/reset.go +++ b/state/reset.go @@ -3,6 +3,7 @@ package state import ( "context" + "github.com/0xPolygonHermez/zkevm-node/log" "github.com/jackc/pgx/v4" ) @@ -13,12 +14,14 @@ func (s *State) Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) erro // - VerifiedBatches // - Entries in exit_root table err := s.ResetToL1BlockNumber(ctx, blockNumber, dbTx) - if err == nil { - // Discard L1InfoTree cache - // We can't rebuild cache, because we are inside a transaction, so we dont known - // is going to be a commit or a rollback. So is going to be rebuild on the next - // request that needs it. - s.l1InfoTree = nil + if err != nil { + log.Error("error resetting L1BlockNumber. Error: ", err) + return err } - return err + // Discard L1InfoTree cache + // We can't rebuild cache, because we are inside a transaction, so we dont known + // is going to be a commit or a rollback. So is going to be rebuild on the next + // request that needs it. + s.l1InfoTree = nil + return nil } diff --git a/state/trace.go b/state/trace.go index 0cc7cc1872..9c7684e8f8 100644 --- a/state/trace.go +++ b/state/trace.go @@ -78,7 +78,15 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has var effectivePercentage []uint8 for i := 0; i <= count; i++ { txsToEncode = append(txsToEncode, *l2Block.Transactions()[i]) - effectivePercentage = append(effectivePercentage, MaxEffectivePercentage) + txGasPrice := tx.GasPrice() + effectiveGasPrice := receipt.EffectiveGasPrice + egpPercentage, err := CalculateEffectiveGasPricePercentage(txGasPrice, effectiveGasPrice) + if errors.Is(err, ErrEffectiveGasPriceEmpty) { + egpPercentage = MaxEffectivePercentage + } else if err != nil { + return nil, err + } + effectivePercentage = append(effectivePercentage, egpPercentage) log.Debugf("trace will reprocess tx: %v", l2Block.Transactions()[i].Hash().String()) } diff --git a/state/transaction.go b/state/transaction.go index f530b4148c..355fdad9ad 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -243,6 +243,7 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P imStateRoots := make([]common.Hash, 0, numTxs) var receipt *types.Receipt + txIndex := 0 for i, txResponse := range l2Block.TransactionResponses { // if the transaction has an intrinsic invalid tx error it means // the transaction has not changed the state, so we don't store it @@ -262,9 +263,10 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P storeTxsEGPData = append(storeTxsEGPData, storeTxEGPData) - receipt = GenerateReceipt(header.Number, txResponse, uint(i), forkID) + receipt = GenerateReceipt(header.Number, txResponse, uint(txIndex), forkID) receipts = append(receipts, receipt) imStateRoots = append(imStateRoots, txResp.StateRoot) + txIndex++ } // Create block to be able to calculate its hash @@ -507,8 +509,7 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty } nonce := loadedNonce.Uint64() - deltaTimestamp := uint32(uint64(time.Now().Unix()) - l2Block.Time()) - transactions := s.BuildChangeL2Block(deltaTimestamp, uint32(0)) + transactions := s.BuildChangeL2Block(uint32(0), uint32(0)) batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, &nonce, forkID) if err != nil { @@ -533,7 +534,7 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty // v2 fields L1InfoRoot: l2Block.BlockInfoRoot().Bytes(), - TimestampLimit: uint64(time.Now().Unix()), + TimestampLimit: l2Block.Time(), SkipFirstChangeL2Block: cFalse, SkipWriteBlockInfoRoot: cTrue, } @@ -541,14 +542,15 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty processBatchRequestV2.NoCounters = cTrue } - log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.From]: %v", processBatchRequestV2.From) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.OldBatchNum]: %v", processBatchRequestV2.OldBatchNum) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.OldStateRoot]: %v", hex.EncodeToHex(processBatchRequestV2.OldStateRoot)) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.OldAccInputHash]: %v", hex.EncodeToHex(processBatchRequestV2.OldAccInputHash)) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.Coinbase]: %v", processBatchRequestV2.Coinbase) - log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ForkId]: %v", processBatchRequestV2.ForkId) - log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ChainId]: %v", processBatchRequestV2.ChainId) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.UpdateMerkleTree]: %v", processBatchRequestV2.UpdateMerkleTree) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ChainId]: %v", processBatchRequestV2.ChainId) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ForkId]: %v", processBatchRequestV2.ForkId) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.From]: %v", processBatchRequestV2.From) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ContextId]: %v", processBatchRequestV2.ContextId) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.L1InfoRoot]: %v", hex.EncodeToHex(processBatchRequestV2.L1InfoRoot)) diff --git a/synchronizer/actions/check_l2block.go b/synchronizer/actions/check_l2block.go index d2d546d6a4..14c9e5cb19 100644 --- a/synchronizer/actions/check_l2block.go +++ b/synchronizer/actions/check_l2block.go @@ -6,9 +6,9 @@ import ( "fmt" "math/big" - "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" ) @@ -129,11 +129,14 @@ func (p *CheckL2BlockHash) iterationCheckL2Block(ctx context.Context, l2BlockNum } func compareL2Blocks(prefixLogs string, localL2Block *state.L2Block, trustedL2Block *types.Block) error { - if localL2Block == nil || trustedL2Block == nil || trustedL2Block.Hash == nil { - return fmt.Errorf("%s localL2Block or trustedL2Block or trustedHash are nil", prefixLogs) + if localL2Block == nil || trustedL2Block == nil { + return fmt.Errorf("%s localL2Block or trustedL2Block are nil", prefixLogs) + } + if localL2Block.Hash() != trustedL2Block.Hash() { + return fmt.Errorf("%s localL2Block.Hash %s and trustedL2Block.Hash %s are different", prefixLogs, localL2Block.Hash().String(), trustedL2Block.Hash().String()) } - if localL2Block.Hash() != *trustedL2Block.Hash { - return fmt.Errorf("%s localL2Block.Hash %s and trustedL2Block.Hash %s are different", prefixLogs, localL2Block.Hash().String(), (*trustedL2Block.Hash).String()) + if localL2Block.ParentHash() != trustedL2Block.ParentHash() { + return fmt.Errorf("%s localL2Block.ParentHash %s and trustedL2Block.ParentHash %s are different", prefixLogs, localL2Block.ParentHash().String(), trustedL2Block.ParentHash().String()) } return nil } diff --git a/synchronizer/actions/check_l2block_test.go b/synchronizer/actions/check_l2block_test.go index da4510fd66..28a8a503b7 100644 --- a/synchronizer/actions/check_l2block_test.go +++ b/synchronizer/actions/check_l2block_test.go @@ -5,7 +5,6 @@ import ( "math/big" "testing" - rpctypes "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions" mock_syncinterfaces "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces/mocks" @@ -19,7 +18,7 @@ import ( type CheckL2BlocksTestData struct { sut *actions.CheckL2BlockHash mockState *mock_syncinterfaces.StateFullInterface - zKEVMClient *mock_syncinterfaces.ZKEVMClientInterface + zKEVMClient *mock_syncinterfaces.ZKEVMClientEthereumCompatibleInterface } func TestCheckL2BlockHash_GetMinimumL2BlockToCheck(t *testing.T) { @@ -57,7 +56,7 @@ func TestCheckL2BlockHashNotEnoughBlocksToCheck(t *testing.T) { func newCheckL2BlocksTestData(t *testing.T, initialL2Block, modulus uint64) CheckL2BlocksTestData { res := CheckL2BlocksTestData{ mockState: mock_syncinterfaces.NewStateFullInterface(t), - zKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), + zKEVMClient: mock_syncinterfaces.NewZKEVMClientEthereumCompatibleInterface(t), } res.sut = actions.NewCheckL2BlockHash(res.mockState, res.zKEVMClient, initialL2Block, modulus) return res @@ -97,18 +96,23 @@ func TestCheckL2BlockHashMatch(t *testing.T) { data.mockState.EXPECT().GetLastL2BlockNumber(mock.Anything, mock.Anything).Return(lastL2Block, nil) data.mockState.EXPECT().GetL2BlockByNumber(mock.Anything, lastL2Block, mock.Anything).Return(stateBlock, nil) - l2blockHash := stateBlock.Hash() - rpcL2Block := rpctypes.Block{ - Hash: &l2blockHash, - Number: rpctypes.ArgUint64(lastL2Block), - } + //l2blockHash := stateBlock.Hash() + // rpcL2Block := rpctypes.Block{ + // Hash: &l2blockHash, + // Number: rpctypes.ArgUint64(lastL2Block), + // } + // create a types.Block object + + rpcL2Block := types.NewBlock(&types.Header{ + Number: big.NewInt(int64(lastL2Block)), + }, nil, nil, nil, nil) - data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(&rpcL2Block, nil) + data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(rpcL2Block, nil) err := data.sut.CheckL2Block(context.Background(), nil) require.NoError(t, err) } -func TestCheckL2BlockHashMissmatch(t *testing.T) { +func TestCheckL2BlockHashMismatch(t *testing.T) { data := newCheckL2BlocksTestData(t, 1, 10) lastL2Block := uint64(14) lastL2BlockBigInt := big.NewInt(int64(lastL2Block)) @@ -119,13 +123,14 @@ func TestCheckL2BlockHashMissmatch(t *testing.T) { data.mockState.EXPECT().GetLastL2BlockNumber(mock.Anything, mock.Anything).Return(lastL2Block, nil) data.mockState.EXPECT().GetL2BlockByNumber(mock.Anything, lastL2Block, mock.Anything).Return(stateBlock, nil) - l2blockHash := common.HexToHash("0x1234") - rpcL2Block := rpctypes.Block{ - Hash: &l2blockHash, - Number: rpctypes.ArgUint64(lastL2Block), - } + //l2blockHash := common.HexToHash("0x1234") + + rpcL2Block := types.NewBlock(&types.Header{ + Number: big.NewInt(int64(lastL2Block)), + ParentHash: common.HexToHash("0x1234"), + }, nil, nil, nil, nil) - data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(&rpcL2Block, nil) + data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(rpcL2Block, nil) err := data.sut.CheckL2Block(context.Background(), nil) require.Error(t, err) } diff --git a/synchronizer/actions/etrog/processor_l1_sequence_batches.go b/synchronizer/actions/etrog/processor_l1_sequence_batches.go index aa82c9c791..e1528594d9 100644 --- a/synchronizer/actions/etrog/processor_l1_sequence_batches.go +++ b/synchronizer/actions/etrog/processor_l1_sequence_batches.go @@ -391,7 +391,7 @@ func (p *ProcessorL1SequenceBatchesEtrog) checkTrustedState(ctx context.Context, reason := reorgReasons.String() if p.sync.IsTrustedSequencer() { - log.Errorf("TRUSTED REORG DETECTED! Batch: %d reson:%s", batch.BatchNumber, reason) + log.Errorf("TRUSTED REORG DETECTED! Batch: %d reason:%s", batch.BatchNumber, reason) // Halt function never have to return! it must blocks the process p.halt(ctx, fmt.Errorf("TRUSTED REORG DETECTED! Batch: %d", batch.BatchNumber)) log.Errorf("CRITICAL!!!: Never have to execute this code. Halt function never have to return! it must blocks the process") diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go index f41e906728..f4790bc695 100644 --- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go +++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go @@ -881,6 +881,66 @@ func (_c *StateFullInterface_GetExitRootByGlobalExitRoot_Call) RunAndReturn(run return _c } +// GetFirstUncheckedBlock provides a mock function with given fields: ctx, fromBlockNumber, dbTx +func (_m *StateFullInterface) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, fromBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstUncheckedBlock") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, fromBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, fromBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateFullInterface_GetFirstUncheckedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstUncheckedBlock' +type StateFullInterface_GetFirstUncheckedBlock_Call struct { + *mock.Call +} + +// GetFirstUncheckedBlock is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StateFullInterface_Expecter) GetFirstUncheckedBlock(ctx interface{}, fromBlockNumber interface{}, dbTx interface{}) *StateFullInterface_GetFirstUncheckedBlock_Call { + return &StateFullInterface_GetFirstUncheckedBlock_Call{Call: _e.mock.On("GetFirstUncheckedBlock", ctx, fromBlockNumber, dbTx)} +} + +func (_c *StateFullInterface_GetFirstUncheckedBlock_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx)) *StateFullInterface_GetFirstUncheckedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StateFullInterface_GetFirstUncheckedBlock_Call) Return(_a0 *state.Block, _a1 error) *StateFullInterface_GetFirstUncheckedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateFullInterface_GetFirstUncheckedBlock_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateFullInterface_GetFirstUncheckedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetForkIDByBatchNumber provides a mock function with given fields: batchNumber func (_m *StateFullInterface) GetForkIDByBatchNumber(batchNumber uint64) uint64 { ret := _m.Called(batchNumber) @@ -2715,6 +2775,55 @@ func (_c *StateFullInterface_UpdateBatchL2Data_Call) RunAndReturn(run func(conte return _c } +// UpdateCheckedBlockByNumber provides a mock function with given fields: ctx, blockNumber, newCheckedStatus, dbTx +func (_m *StateFullInterface) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + ret := _m.Called(ctx, blockNumber, newCheckedStatus, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateCheckedBlockByNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, bool, pgx.Tx) error); ok { + r0 = rf(ctx, blockNumber, newCheckedStatus, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StateFullInterface_UpdateCheckedBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCheckedBlockByNumber' +type StateFullInterface_UpdateCheckedBlockByNumber_Call struct { + *mock.Call +} + +// UpdateCheckedBlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - newCheckedStatus bool +// - dbTx pgx.Tx +func (_e *StateFullInterface_Expecter) UpdateCheckedBlockByNumber(ctx interface{}, blockNumber interface{}, newCheckedStatus interface{}, dbTx interface{}) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + return &StateFullInterface_UpdateCheckedBlockByNumber_Call{Call: _e.mock.On("UpdateCheckedBlockByNumber", ctx, blockNumber, newCheckedStatus, dbTx)} +} + +func (_c *StateFullInterface_UpdateCheckedBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx)) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(bool), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StateFullInterface_UpdateCheckedBlockByNumber_Call) Return(_a0 error) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StateFullInterface_UpdateCheckedBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, bool, pgx.Tx) error) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // UpdateForkIDBlockNumber provides a mock function with given fields: ctx, forkdID, newBlockNumber, updateMemCache, dbTx func (_m *StateFullInterface) UpdateForkIDBlockNumber(ctx context.Context, forkdID uint64, newBlockNumber uint64, updateMemCache bool, dbTx pgx.Tx) error { ret := _m.Called(ctx, forkdID, newBlockNumber, updateMemCache, dbTx) diff --git a/synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go b/synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go new file mode 100644 index 0000000000..09c0b0f235 --- /dev/null +++ b/synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock_syncinterfaces + +import ( + context "context" + big "math/big" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// ZKEVMClientEthereumCompatibleInterface is an autogenerated mock type for the ZKEVMClientEthereumCompatibleInterface type +type ZKEVMClientEthereumCompatibleInterface struct { + mock.Mock +} + +type ZKEVMClientEthereumCompatibleInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ZKEVMClientEthereumCompatibleInterface) EXPECT() *ZKEVMClientEthereumCompatibleInterface_Expecter { + return &ZKEVMClientEthereumCompatibleInterface_Expecter{mock: &_m.Mock} +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *ZKEVMClientEthereumCompatibleInterface) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *ZKEVMClientEthereumCompatibleInterface_Expecter) BlockByNumber(ctx interface{}, number interface{}) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + return &ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewZKEVMClientEthereumCompatibleInterface creates a new instance of ZKEVMClientEthereumCompatibleInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewZKEVMClientEthereumCompatibleInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ZKEVMClientEthereumCompatibleInterface { + mock := &ZKEVMClientEthereumCompatibleInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go index 3503338c1b..1034b5fab9 100644 --- a/synchronizer/common/syncinterfaces/state.go +++ b/synchronizer/common/syncinterfaces/state.go @@ -29,6 +29,8 @@ type StateFullInterface interface { AddBlock(ctx context.Context, block *state.Block, dbTx pgx.Tx) error Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) error GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*state.Block, error) + GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) + UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error diff --git a/synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go b/synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go new file mode 100644 index 0000000000..416371dfce --- /dev/null +++ b/synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go @@ -0,0 +1,21 @@ +package syncinterfaces + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" +) + +// ZKEVMClientEthereumCompatibleInterface contains the methods required to interact with zkEVM-RPC as a ethereum-API compatible +// +// Reason behind: the zkEVMClient have some extensions to ethereum-API that are not compatible with all nodes. So if you need to maximize +// the compatibility the idea is to use a regular ethereum-API compatible client +type ZKEVMClientEthereumCompatibleInterface interface { + ZKEVMClientEthereumCompatibleL2BlockGetter +} + +// ZKEVMClientEthereumCompatibleL2BlockGetter contains the methods required to interact with zkEVM-RPC as a ethereum-API compatible for obtain Block information +type ZKEVMClientEthereumCompatibleL2BlockGetter interface { + BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) +} diff --git a/synchronizer/config.go b/synchronizer/config.go index 55bc29d3e7..0f7d822a60 100644 --- a/synchronizer/config.go +++ b/synchronizer/config.go @@ -13,6 +13,8 @@ type Config struct { SyncChunkSize uint64 `mapstructure:"SyncChunkSize"` // TrustedSequencerURL is the rpc url to connect and sync the trusted state TrustedSequencerURL string `mapstructure:"TrustedSequencerURL"` + // SyncBlockProtection specify the state to sync (lastest, finalized or safe) + SyncBlockProtection string `mapstructure:"SyncBlockProtection"` // L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless) L1SyncCheckL2BlockHash bool `mapstructure:"L1SyncCheckL2BlockHash"` diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go index bb1a0798fa..7c89494441 100644 --- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go +++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go @@ -136,13 +136,13 @@ func (b *SyncTrustedBatchExecutorForEtrog) FullProcess(ctx context.Context, data return nil, err } - leafs, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, data.TrustedBatch.BatchL2Data, dbTx) + leaves, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, data.TrustedBatch.BatchL2Data, dbTx) if err != nil { log.Errorf("%s error getting GetL1InfoTreeDataFromBatchL2Data: %v. Error:%w", data.DebugPrefix, l1InfoRoot, err) return nil, err } debugStr := data.DebugPrefix - processBatchResp, err := b.processAndStoreTxs(ctx, b.getProcessRequest(data, leafs, l1InfoRoot), dbTx, debugStr) + processBatchResp, err := b.processAndStoreTxs(ctx, b.getProcessRequest(data, leaves, l1InfoRoot), dbTx, debugStr) if err != nil { log.Error("%s error procesingAndStoringTxs. Error: ", debugStr, err) return nil, err @@ -197,7 +197,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) IncrementalProcess(ctx context.Contex return nil, err } - leafs, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, PartialBatchL2Data, dbTx) + leaves, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, PartialBatchL2Data, dbTx) if err != nil { log.Errorf("%s error getting GetL1InfoTreeDataFromBatchL2Data: %v. Error:%w", data.DebugPrefix, l1InfoRoot, err) // TODO: Need to refine, depending of the response of GetL1InfoTreeDataFromBatchL2Data @@ -205,7 +205,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) IncrementalProcess(ctx context.Contex return nil, syncinterfaces.ErrMissingSyncFromL1 } debugStr := fmt.Sprintf("%s: Batch %d:", data.Mode, uint64(data.TrustedBatch.Number)) - processReq := b.getProcessRequest(data, leafs, l1InfoRoot) + processReq := b.getProcessRequest(data, leaves, l1InfoRoot) processReq.Transactions = PartialBatchL2Data processBatchResp, err := b.processAndStoreTxs(ctx, processReq, dbTx, debugStr) if err != nil { diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index c0a9cdfd92..6abe161d02 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -22,6 +22,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/synchronizer/l2_sync/l2_sync_etrog" "github.com/0xPolygonHermez/zkevm-node/synchronizer/metrics" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" "github.com/jackc/pgx/v4" ) @@ -52,17 +53,19 @@ type ClientSynchronizer struct { etherMan syncinterfaces.EthermanFullInterface latestFlushID uint64 // If true the lastFlushID is stored in DB and we don't need to check again - latestFlushIDIsFulfilled bool - etherManForL1 []syncinterfaces.EthermanFullInterface - state syncinterfaces.StateFullInterface - pool syncinterfaces.PoolInterface - ethTxManager syncinterfaces.EthTxManager - zkEVMClient syncinterfaces.ZKEVMClientInterface - eventLog syncinterfaces.EventLogInterface - ctx context.Context - cancelCtx context.CancelFunc - genesis state.Genesis - cfg Config + latestFlushIDIsFulfilled bool + syncBlockProtection rpc.BlockNumber + etherManForL1 []syncinterfaces.EthermanFullInterface + state syncinterfaces.StateFullInterface + pool syncinterfaces.PoolInterface + ethTxManager syncinterfaces.EthTxManager + zkEVMClient syncinterfaces.ZKEVMClientInterface + zkEVMClientEthereumCompatible syncinterfaces.ZKEVMClientEthereumCompatibleInterface + eventLog syncinterfaces.EventLogInterface + ctx context.Context + cancelCtx context.CancelFunc + genesis state.Genesis + cfg Config // Id of the 'process' of the executor. Each time that it starts this value changes // This value is obtained from the call state.GetStoredFlushID // It starts as an empty string and it is filled in the first call @@ -85,30 +88,40 @@ func NewSynchronizer( pool syncinterfaces.PoolInterface, ethTxManager syncinterfaces.EthTxManager, zkEVMClient syncinterfaces.ZKEVMClientInterface, + zkEVMClientEthereumCompatible syncinterfaces.ZKEVMClientEthereumCompatibleInterface, eventLog syncinterfaces.EventLogInterface, genesis state.Genesis, cfg Config, runInDevelopmentMode bool) (Synchronizer, error) { ctx, cancel := context.WithCancel(context.Background()) metrics.Register() + syncBlockProtection, err := decodeSyncBlockProtection(cfg.SyncBlockProtection) + if err != nil { + log.Errorf("error decoding syncBlockProtection. Error: %v", err) + cancel() + return nil, err + } + log.Info("syncBlockProtection: ", syncBlockProtection) res := &ClientSynchronizer{ - isTrustedSequencer: isTrustedSequencer, - state: st, - etherMan: ethMan, - etherManForL1: etherManForL1, - pool: pool, - ctx: ctx, - cancelCtx: cancel, - ethTxManager: ethTxManager, - zkEVMClient: zkEVMClient, - eventLog: eventLog, - genesis: genesis, - cfg: cfg, - proverID: "", - previousExecutorFlushID: 0, - l1SyncOrchestration: nil, - l1EventProcessors: nil, - halter: syncCommon.NewCriticalErrorHalt(eventLog, 5*time.Second), //nolint:gomnd + isTrustedSequencer: isTrustedSequencer, + state: st, + etherMan: ethMan, + etherManForL1: etherManForL1, + pool: pool, + ctx: ctx, + cancelCtx: cancel, + ethTxManager: ethTxManager, + zkEVMClient: zkEVMClient, + zkEVMClientEthereumCompatible: zkEVMClientEthereumCompatible, + eventLog: eventLog, + genesis: genesis, + cfg: cfg, + proverID: "", + previousExecutorFlushID: 0, + l1SyncOrchestration: nil, + l1EventProcessors: nil, + syncBlockProtection: syncBlockProtection, + halter: syncCommon.NewCriticalErrorHalt(eventLog, 5*time.Second), //nolint:gomnd } if !isTrustedSequencer { @@ -143,7 +156,7 @@ func NewSynchronizer( log.Errorf("error getting last L2Block number from state. Error: %v", err) return nil, err } - l1checkerL2Blocks = actions.NewCheckL2BlockHash(res.state, res.zkEVMClient, initialL2Block, cfg.L1SyncCheckL2BlockNumberhModulus) + l1checkerL2Blocks = actions.NewCheckL2BlockHash(res.state, res.zkEVMClientEthereumCompatible, initialL2Block, cfg.L1SyncCheckL2BlockNumberhModulus) } else { log.Infof("Trusted Node can't check L2Block hash, ignoring parameter") } @@ -163,6 +176,19 @@ func NewSynchronizer( return res, nil } +func decodeSyncBlockProtection(sBP string) (rpc.BlockNumber, error) { + switch sBP { + case "latest": + return rpc.LatestBlockNumber, nil + case "finalized": + return rpc.FinalizedBlockNumber, nil + case "safe": + return rpc.SafeBlockNumber, nil + default: + return 0, fmt.Errorf("error decoding SyncBlockProtection. Unknown value") + } +} + var waitDuration = time.Duration(0) func newL1SyncParallel(ctx context.Context, cfg Config, etherManForL1 []syncinterfaces.EthermanFullInterface, sync *ClientSynchronizer, runExternalControl bool) *l1_parallel_sync.L1SyncOrchestration { @@ -520,6 +546,14 @@ func (s *ClientSynchronizer) syncBlocksParallel(lastEthBlockSynced *state.Block) // This function syncs the node from a specific block to the latest func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Block) (*state.Block, error) { + // Call the blockchain to retrieve data + header, err := s.etherMan.HeaderByNumber(s.ctx, big.NewInt(s.syncBlockProtection.Int64())) + if err != nil { + log.Error("error getting header of the latest block in L1. Error: ", err) + return lastEthBlockSynced, err + } + lastKnownBlock := header.Number + // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok. block, err := s.checkReorg(lastEthBlockSynced) if err != nil { @@ -535,13 +569,6 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc return block, nil } - // Call the blockchain to retrieve data - header, err := s.etherMan.HeaderByNumber(s.ctx, nil) - if err != nil { - return lastEthBlockSynced, err - } - lastKnownBlock := header.Number - var fromBlock uint64 if lastEthBlockSynced.BlockNumber > 0 { fromBlock = lastEthBlockSynced.BlockNumber + 1 @@ -549,6 +576,9 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc for { toBlock := fromBlock + s.cfg.SyncChunkSize + if toBlock > lastKnownBlock.Uint64() { + toBlock = lastKnownBlock.Uint64() + } log.Infof("Syncing block %d of %d", fromBlock, lastKnownBlock.Uint64()) log.Infof("Getting rollup info from block %d to block %d", fromBlock, toBlock) // This function returns the rollup information contained in the ethereum blocks and an extra param called order. @@ -562,6 +592,22 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc if err != nil { return lastEthBlockSynced, err } + + // Check reorg again to be sure that the chain has not changed between the previous checkReorg and the call GetRollupInfoByBlockRange + block, err := s.checkReorg(lastEthBlockSynced) + if err != nil { + log.Errorf("error checking reorgs. Retrying... Err: %v", err) + return lastEthBlockSynced, fmt.Errorf("error checking reorgs") + } + if block != nil { + err = s.resetState(block.BlockNumber) + if err != nil { + log.Errorf("error resetting the state to a previous block. Retrying... Err: %v", err) + return lastEthBlockSynced, fmt.Errorf("error resetting the state to a previous block") + } + return block, nil + } + start = time.Now() err = s.ProcessBlockRange(blocks, order) metrics.ProcessL1DataTime(time.Since(start)) @@ -755,26 +801,29 @@ hash and has parent. This operation has to be done until a match is found. func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, error) { // This function only needs to worry about reorgs if some of the reorganized blocks contained rollup info. latestEthBlockSynced := *latestBlock + reorgedBlock := *latestBlock var depth uint64 for { - block, err := s.etherMan.EthBlockByNumber(s.ctx, latestBlock.BlockNumber) + block, err := s.etherMan.EthBlockByNumber(s.ctx, reorgedBlock.BlockNumber) if err != nil { - log.Errorf("error getting latest block synced from blockchain. Block: %d, error: %v", latestBlock.BlockNumber, err) + log.Errorf("error getting latest block synced from blockchain. Block: %d, error: %v", reorgedBlock.BlockNumber, err) return nil, err } - if block.NumberU64() != latestBlock.BlockNumber { + log.Infof("[checkReorg function] BlockNumber: %d BlockHash got from L1 provider: %s", block.Number().Uint64(), block.Hash().String()) + log.Infof("[checkReorg function] latestBlockNumber: %d latestBlockHash already synced: %s", latestBlock.BlockNumber, latestBlock.BlockHash.String()) + if block.NumberU64() != reorgedBlock.BlockNumber { err = fmt.Errorf("wrong ethereum block retrieved from blockchain. Block numbers don't match. BlockNumber stored: %d. BlockNumber retrieved: %d", - latestBlock.BlockNumber, block.NumberU64()) + reorgedBlock.BlockNumber, block.NumberU64()) log.Error("error: ", err) return nil, err } // Compare hashes - if (block.Hash() != latestBlock.BlockHash || block.ParentHash() != latestBlock.ParentHash) && latestBlock.BlockNumber > s.genesis.RollupBlockNumber { - log.Infof("checkReorg: Bad block %d hashOk %t parentHashOk %t", latestBlock.BlockNumber, block.Hash() == latestBlock.BlockHash, block.ParentHash() == latestBlock.ParentHash) - log.Debug("[checkReorg function] => latestBlockNumber: ", latestBlock.BlockNumber) - log.Debug("[checkReorg function] => latestBlockHash: ", latestBlock.BlockHash) - log.Debug("[checkReorg function] => latestBlockHashParent: ", latestBlock.ParentHash) - log.Debug("[checkReorg function] => BlockNumber: ", latestBlock.BlockNumber, block.NumberU64()) + if (block.Hash() != reorgedBlock.BlockHash || block.ParentHash() != reorgedBlock.ParentHash) && reorgedBlock.BlockNumber > s.genesis.RollupBlockNumber { + log.Infof("checkReorg: Bad block %d hashOk %t parentHashOk %t", reorgedBlock.BlockNumber, block.Hash() == reorgedBlock.BlockHash, block.ParentHash() == reorgedBlock.ParentHash) + log.Debug("[checkReorg function] => latestBlockNumber: ", reorgedBlock.BlockNumber) + log.Debug("[checkReorg function] => latestBlockHash: ", reorgedBlock.BlockHash) + log.Debug("[checkReorg function] => latestBlockHashParent: ", reorgedBlock.ParentHash) + log.Debug("[checkReorg function] => BlockNumber: ", reorgedBlock.BlockNumber, block.NumberU64()) log.Debug("[checkReorg function] => BlockHash: ", block.Hash()) log.Debug("[checkReorg function] => BlockHashParent: ", block.ParentHash()) depth++ @@ -785,7 +834,7 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, log.Errorf("error creating db transaction to get prevoius blocks") return nil, err } - latestBlock, err = s.state.GetPreviousBlock(s.ctx, depth, dbTx) + lb, err := s.state.GetPreviousBlock(s.ctx, depth, dbTx) errC := dbTx.Commit(s.ctx) if errC != nil { log.Errorf("error committing dbTx, err: %v", errC) @@ -801,16 +850,21 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, log.Warn("error checking reorg: previous block not found in db: ", err) return &state.Block{}, nil } else if err != nil { + log.Error("error getting previousBlock from db. Error: ", err) return nil, err } + reorgedBlock = *lb } else { + log.Debugf("checkReorg: Block %d hashOk %t parentHashOk %t", reorgedBlock.BlockNumber, block.Hash() == reorgedBlock.BlockHash, block.ParentHash() == reorgedBlock.ParentHash) break } } - if latestEthBlockSynced.BlockHash != latestBlock.BlockHash { + if latestEthBlockSynced.BlockHash != reorgedBlock.BlockHash { + latestBlock = &reorgedBlock log.Info("Reorg detected in block: ", latestEthBlockSynced.BlockNumber, " last block OK: ", latestBlock.BlockNumber) return latestBlock, nil } + log.Debugf("No reorg detected in block: %d. BlockHash: %s", latestEthBlockSynced.BlockNumber, latestEthBlockSynced.BlockHash.String()) return nil, nil } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 7d22347d5f..4902aa4be6 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -18,6 +18,7 @@ import ( syncMocks "github.com/0xPolygonHermez/zkevm-node/synchronizer/mocks" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -32,12 +33,13 @@ const ( ) type mocks struct { - Etherman *mock_syncinterfaces.EthermanFullInterface - State *mock_syncinterfaces.StateFullInterface - Pool *mock_syncinterfaces.PoolInterface - EthTxManager *mock_syncinterfaces.EthTxManager - DbTx *syncMocks.DbTxMock - ZKEVMClient *mock_syncinterfaces.ZKEVMClientInterface + Etherman *mock_syncinterfaces.EthermanFullInterface + State *mock_syncinterfaces.StateFullInterface + Pool *mock_syncinterfaces.PoolInterface + EthTxManager *mock_syncinterfaces.EthTxManager + DbTx *syncMocks.DbTxMock + ZKEVMClient *mock_syncinterfaces.ZKEVMClientInterface + zkEVMClientEthereumCompatible *mock_syncinterfaces.ZKEVMClientEthereumCompatibleInterface //EventLog *eventLogMock } @@ -47,7 +49,7 @@ type mocks struct { func TestGivenPermissionlessNodeWhenSyncronizeAgainSameBatchThenUseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) { genesis, cfg, m := setupGenericTest(t) ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, false) + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, *genesis, *cfg, false) require.NoError(t, err) sync, ok := syncInterface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") @@ -87,7 +89,7 @@ func TestGivenPermissionlessNodeWhenSyncronizeAgainSameBatchThenUseTheOneInMemor func TestGivenPermissionlessNodeWhenSyncronizeFirstTimeABatchThenStoreItInALocalVar(t *testing.T) { genesis, cfg, m := setupGenericTest(t) ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, false) + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, *genesis, *cfg, false) require.NoError(t, err) sync, ok := syncInterface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") @@ -125,6 +127,7 @@ func TestForcedBatchEtrog(t *testing.T) { SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, L1SynchronizationMode: SequentialMode, + SyncBlockProtection: "latest", } m := mocks{ @@ -135,7 +138,7 @@ func TestForcedBatchEtrog(t *testing.T) { ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), } ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - sync, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, false) + sync, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, genesis, cfg, false) require.NoError(t, err) // state preparation @@ -201,7 +204,7 @@ func TestForcedBatchEtrog(t *testing.T) { Return(ethBlock, nil). Once() - var n *big.Int + n := big.NewInt(rpc.LatestBlockNumber.Int64()) m.Etherman. On("HeaderByNumber", mock.Anything, n). Return(ethHeader, nil). @@ -254,12 +257,19 @@ func TestForcedBatchEtrog(t *testing.T) { fromBlock := ethBlock.NumberU64() + 1 toBlock := fromBlock + cfg.SyncChunkSize - + if toBlock > ethHeader.Number.Uint64() { + toBlock = ethHeader.Number.Uint64() + } m.Etherman. On("GetRollupInfoByBlockRange", mock.Anything, fromBlock, &toBlock). Return(blocks, order, nil). Once() + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock.BlockNumber). + Return(ethBlock, nil). + Once() + m.ZKEVMClient. On("BatchNumber", ctx). Return(uint64(1), nil) @@ -378,6 +388,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, L1SynchronizationMode: SequentialMode, + SyncBlockProtection: "latest", } m := mocks{ @@ -388,7 +399,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), } ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - sync, err := NewSynchronizer(true, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, false) + sync, err := NewSynchronizer(true, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, genesis, cfg, false) require.NoError(t, err) // state preparation @@ -455,7 +466,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { Return(ethBlock, nil). Once() - var n *big.Int + n := big.NewInt(rpc.LatestBlockNumber.Int64()) m.Etherman. On("HeaderByNumber", ctx, n). Return(ethHeader, nil). @@ -503,12 +514,19 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { fromBlock := ethBlock.NumberU64() + 1 toBlock := fromBlock + cfg.SyncChunkSize - + if toBlock > ethHeader.Number.Uint64() { + toBlock = ethHeader.Number.Uint64() + } m.Etherman. On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). Return(blocks, order, nil). Once() + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock.BlockNumber). + Return(ethBlock, nil). + Once() + m.State. On("BeginStateTransaction", ctx). Return(m.DbTx, nil). @@ -617,6 +635,7 @@ func setupGenericTest(t *testing.T) (*state.Genesis, *Config, *mocks) { SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, L1SynchronizationMode: SequentialMode, + SyncBlockProtection: "latest", L1ParallelSynchronization: L1ParallelSynchronizationConfig{ MaxClients: 2, MaxPendingNoProcessedBlocks: 2, @@ -631,12 +650,13 @@ func setupGenericTest(t *testing.T) (*state.Genesis, *Config, *mocks) { } m := mocks{ - Etherman: mock_syncinterfaces.NewEthermanFullInterface(t), - State: mock_syncinterfaces.NewStateFullInterface(t), - Pool: mock_syncinterfaces.NewPoolInterface(t), - DbTx: syncMocks.NewDbTxMock(t), - ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), - EthTxManager: mock_syncinterfaces.NewEthTxManager(t), + Etherman: mock_syncinterfaces.NewEthermanFullInterface(t), + State: mock_syncinterfaces.NewStateFullInterface(t), + Pool: mock_syncinterfaces.NewPoolInterface(t), + DbTx: syncMocks.NewDbTxMock(t), + ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), + zkEVMClientEthereumCompatible: mock_syncinterfaces.NewZKEVMClientEthereumCompatibleInterface(t), + EthTxManager: mock_syncinterfaces.NewEthTxManager(t), //EventLog: newEventLogMock(t), } return &genesis, &cfg, &m diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml index 2ef446a11e..e5a1dc7981 100644 --- a/test/config/test.node.config.toml +++ b/test/config/test.node.config.toml @@ -86,6 +86,7 @@ DisableAPIs = [] SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc. +SyncBlockProtection = "latest" # latest, finalized, safe L1SynchronizationMode = "sequential" [Synchronizer.L1ParallelSynchronization] MaxClients = 10 diff --git a/test/contracts/auto/CounterAndBlock.sol b/test/contracts/auto/CounterAndBlock.sol new file mode 100644 index 0000000000..53035f0634 --- /dev/null +++ b/test/contracts/auto/CounterAndBlock.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract CounterAndBlock { + uint public count; + + function increment() external { + count += 1; + } + + function getCount() public view returns (uint, uint) { + return (count, block.timestamp); + } +} diff --git a/test/contracts/bin/CounterAndBlock/CounterAndBlock.go b/test/contracts/bin/CounterAndBlock/CounterAndBlock.go new file mode 100644 index 0000000000..c066117f4d --- /dev/null +++ b/test/contracts/bin/CounterAndBlock/CounterAndBlock.go @@ -0,0 +1,287 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package CounterAndBlock + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// CounterAndBlockMetaData contains all meta data concerning the CounterAndBlock contract. +var CounterAndBlockMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"count\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"increment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060eb8061001f6000396000f3fe6080604052348015600f57600080fd5b5060043610603c5760003560e01c806306661abd146041578063a87d942c14605c578063d09de08a146071575b600080fd5b604960005481565b6040519081526020015b60405180910390f35b60005460408051918252426020830152016053565b60776079565b005b6001600080828254608991906090565b9091555050565b6000821982111560b057634e487b7160e01b600052601160045260246000fd5b50019056fea26469706673582212205aa9aebefdfb857d27d7bdc8475c08138617cc37e78c2e6bd98acb9a1484994964736f6c634300080c0033", +} + +// CounterAndBlockABI is the input ABI used to generate the binding from. +// Deprecated: Use CounterAndBlockMetaData.ABI instead. +var CounterAndBlockABI = CounterAndBlockMetaData.ABI + +// CounterAndBlockBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use CounterAndBlockMetaData.Bin instead. +var CounterAndBlockBin = CounterAndBlockMetaData.Bin + +// DeployCounterAndBlock deploys a new Ethereum contract, binding an instance of CounterAndBlock to it. +func DeployCounterAndBlock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *CounterAndBlock, error) { + parsed, err := CounterAndBlockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(CounterAndBlockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &CounterAndBlock{CounterAndBlockCaller: CounterAndBlockCaller{contract: contract}, CounterAndBlockTransactor: CounterAndBlockTransactor{contract: contract}, CounterAndBlockFilterer: CounterAndBlockFilterer{contract: contract}}, nil +} + +// CounterAndBlock is an auto generated Go binding around an Ethereum contract. +type CounterAndBlock struct { + CounterAndBlockCaller // Read-only binding to the contract + CounterAndBlockTransactor // Write-only binding to the contract + CounterAndBlockFilterer // Log filterer for contract events +} + +// CounterAndBlockCaller is an auto generated read-only Go binding around an Ethereum contract. +type CounterAndBlockCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// CounterAndBlockTransactor is an auto generated write-only Go binding around an Ethereum contract. +type CounterAndBlockTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// CounterAndBlockFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type CounterAndBlockFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// CounterAndBlockSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type CounterAndBlockSession struct { + Contract *CounterAndBlock // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// CounterAndBlockCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type CounterAndBlockCallerSession struct { + Contract *CounterAndBlockCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// CounterAndBlockTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type CounterAndBlockTransactorSession struct { + Contract *CounterAndBlockTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// CounterAndBlockRaw is an auto generated low-level Go binding around an Ethereum contract. +type CounterAndBlockRaw struct { + Contract *CounterAndBlock // Generic contract binding to access the raw methods on +} + +// CounterAndBlockCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type CounterAndBlockCallerRaw struct { + Contract *CounterAndBlockCaller // Generic read-only contract binding to access the raw methods on +} + +// CounterAndBlockTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type CounterAndBlockTransactorRaw struct { + Contract *CounterAndBlockTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewCounterAndBlock creates a new instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlock(address common.Address, backend bind.ContractBackend) (*CounterAndBlock, error) { + contract, err := bindCounterAndBlock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &CounterAndBlock{CounterAndBlockCaller: CounterAndBlockCaller{contract: contract}, CounterAndBlockTransactor: CounterAndBlockTransactor{contract: contract}, CounterAndBlockFilterer: CounterAndBlockFilterer{contract: contract}}, nil +} + +// NewCounterAndBlockCaller creates a new read-only instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlockCaller(address common.Address, caller bind.ContractCaller) (*CounterAndBlockCaller, error) { + contract, err := bindCounterAndBlock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &CounterAndBlockCaller{contract: contract}, nil +} + +// NewCounterAndBlockTransactor creates a new write-only instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlockTransactor(address common.Address, transactor bind.ContractTransactor) (*CounterAndBlockTransactor, error) { + contract, err := bindCounterAndBlock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &CounterAndBlockTransactor{contract: contract}, nil +} + +// NewCounterAndBlockFilterer creates a new log filterer instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlockFilterer(address common.Address, filterer bind.ContractFilterer) (*CounterAndBlockFilterer, error) { + contract, err := bindCounterAndBlock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &CounterAndBlockFilterer{contract: contract}, nil +} + +// bindCounterAndBlock binds a generic wrapper to an already deployed contract. +func bindCounterAndBlock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := CounterAndBlockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_CounterAndBlock *CounterAndBlockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CounterAndBlock.Contract.CounterAndBlockCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_CounterAndBlock *CounterAndBlockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CounterAndBlock.Contract.CounterAndBlockTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_CounterAndBlock *CounterAndBlockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CounterAndBlock.Contract.CounterAndBlockTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_CounterAndBlock *CounterAndBlockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CounterAndBlock.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_CounterAndBlock *CounterAndBlockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CounterAndBlock.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_CounterAndBlock *CounterAndBlockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CounterAndBlock.Contract.contract.Transact(opts, method, params...) +} + +// Count is a free data retrieval call binding the contract method 0x06661abd. +// +// Solidity: function count() view returns(uint256) +func (_CounterAndBlock *CounterAndBlockCaller) Count(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _CounterAndBlock.contract.Call(opts, &out, "count") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Count is a free data retrieval call binding the contract method 0x06661abd. +// +// Solidity: function count() view returns(uint256) +func (_CounterAndBlock *CounterAndBlockSession) Count() (*big.Int, error) { + return _CounterAndBlock.Contract.Count(&_CounterAndBlock.CallOpts) +} + +// Count is a free data retrieval call binding the contract method 0x06661abd. +// +// Solidity: function count() view returns(uint256) +func (_CounterAndBlock *CounterAndBlockCallerSession) Count() (*big.Int, error) { + return _CounterAndBlock.Contract.Count(&_CounterAndBlock.CallOpts) +} + +// GetCount is a free data retrieval call binding the contract method 0xa87d942c. +// +// Solidity: function getCount() view returns(uint256, uint256) +func (_CounterAndBlock *CounterAndBlockCaller) GetCount(opts *bind.CallOpts) (*big.Int, *big.Int, error) { + var out []interface{} + err := _CounterAndBlock.contract.Call(opts, &out, "getCount") + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +// GetCount is a free data retrieval call binding the contract method 0xa87d942c. +// +// Solidity: function getCount() view returns(uint256, uint256) +func (_CounterAndBlock *CounterAndBlockSession) GetCount() (*big.Int, *big.Int, error) { + return _CounterAndBlock.Contract.GetCount(&_CounterAndBlock.CallOpts) +} + +// GetCount is a free data retrieval call binding the contract method 0xa87d942c. +// +// Solidity: function getCount() view returns(uint256, uint256) +func (_CounterAndBlock *CounterAndBlockCallerSession) GetCount() (*big.Int, *big.Int, error) { + return _CounterAndBlock.Contract.GetCount(&_CounterAndBlock.CallOpts) +} + +// Increment is a paid mutator transaction binding the contract method 0xd09de08a. +// +// Solidity: function increment() returns() +func (_CounterAndBlock *CounterAndBlockTransactor) Increment(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CounterAndBlock.contract.Transact(opts, "increment") +} + +// Increment is a paid mutator transaction binding the contract method 0xd09de08a. +// +// Solidity: function increment() returns() +func (_CounterAndBlock *CounterAndBlockSession) Increment() (*types.Transaction, error) { + return _CounterAndBlock.Contract.Increment(&_CounterAndBlock.TransactOpts) +} + +// Increment is a paid mutator transaction binding the contract method 0xd09de08a. +// +// Solidity: function increment() returns() +func (_CounterAndBlock *CounterAndBlockTransactorSession) Increment() (*types.Transaction, error) { + return _CounterAndBlock.Contract.Increment(&_CounterAndBlock.TransactOpts) +} diff --git a/test/e2e/forced_batches_vector_shared.go b/test/e2e/forced_batches_vector_shared.go index f44661cffe..12ceb3af5d 100644 --- a/test/e2e/forced_batches_vector_shared.go +++ b/test/e2e/forced_batches_vector_shared.go @@ -93,7 +93,7 @@ func LaunchTestForcedBatchesVectorFilesGroup(t *testing.T, vectorFilesDir string } log.Info("#######################") - log.Info("# Verifying new leafs #") + log.Info("# Verifying new leaves #") log.Info("#######################") merkleTree := opsman.State().GetTree() for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { diff --git a/test/e2e/sc_test.go b/test/e2e/sc_test.go index f337f6ccf1..720aa68cd2 100644 --- a/test/e2e/sc_test.go +++ b/test/e2e/sc_test.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Counter" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/CounterAndBlock" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/EmitLog2" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/FailureTest" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Read" @@ -646,3 +647,111 @@ func TestRead(t *testing.T) { require.Equal(t, 0, big.NewInt(2).Cmp(value)) } } + +func TestCounterAndBlock(t *testing.T) { + if testing.Short() { + t.Skip() + } + + var err error + err = operations.Teardown() + require.NoError(t, err) + + defer func() { require.NoError(t, operations.Teardown()) }() + + ctx := context.Background() + opsCfg := operations.GetDefaultOperationsConfig() + opsMan, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + err = opsMan.Setup() + require.NoError(t, err) + + for _, network := range networks { + log.Debugf(network.Name) + client := operations.MustGetClient(network.URL) + priKey := network.PrivateKey + if network.Name == "Local L2" { + priKey = "0xde3ca643a52f5543e84ba984c4419ff40dbabd0e483c31c1d09fee8168d68e38" + } + auth := operations.MustGetAuth(priKey, network.ChainID) + log.Infof("auth:%v, chainID:%v", auth.From.String(), network.ChainID) + + _, scTx, sc, err := CounterAndBlock.DeployCounterAndBlock(auth, client) + require.NoError(t, err) + + logTx(scTx) + err = operations.WaitTxToBeMined(ctx, client, scTx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + scReceipt, err := client.TransactionReceipt(ctx, scTx.Hash()) + require.NoError(t, err) + + scBlock, err := client.BlockByNumber(ctx, scReceipt.BlockNumber) + require.NoError(t, err) + + count, ts, err := sc.GetCount(&bind.CallOpts{Pending: false, BlockNumber: scBlock.Number()}) + require.NoError(t, err) + + assert.Equal(t, 0, count.Cmp(big.NewInt(0))) + assert.Equal(t, ts.Uint64(), scBlock.Time()) + + const numberOfIncrements = 5 + type result struct { + tx *types.Transaction + receipt *types.Receipt + block *types.Block + expectedCount *big.Int + } + + results := make([]result, 0, numberOfIncrements) + for i := 0; i < numberOfIncrements; i++ { + tx, err := sc.Increment(auth) + require.NoError(t, err) + + logTx(tx) + err = operations.WaitTxToBeMined(ctx, client, tx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + receipt, err := client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) + + block, err := client.BlockByNumber(ctx, receipt.BlockNumber) + require.NoError(t, err) + + results = append(results, result{ + tx: tx, + expectedCount: big.NewInt(int64(i) + 1), + receipt: receipt, + block: block, + }) + } + + const numberOfChecks = 2 + + // checks against first increment + for _, r := range results { + for i := 0; i < numberOfChecks; i++ { + count, ts, err = sc.GetCount(&bind.CallOpts{Pending: false, BlockNumber: r.block.Number()}) + require.NoError(t, err) + assert.Equal(t, r.expectedCount.Uint64(), count.Uint64()) + assert.Equal(t, r.block.Time(), ts.Uint64()) + + time.Sleep(time.Second) + } + } + + latestIncrement := results[len(results)-1] + // checks against second increment with latest block + for i := 0; i < numberOfChecks; i++ { + latestBlock, err := client.BlockByNumber(ctx, nil) + require.NoError(t, err) + + count, ts, err = sc.GetCount(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + assert.Equal(t, latestIncrement.expectedCount.Uint64(), count.Uint64()) + assert.Equal(t, latestBlock.Time(), ts.Uint64()) + + time.Sleep(time.Second) + } + } +} diff --git a/test/e2e/state_test.go b/test/e2e/state_test.go index 9f9712c3bb..2db3b38bd3 100644 --- a/test/e2e/state_test.go +++ b/test/e2e/state_test.go @@ -83,7 +83,7 @@ func TestStateTransition(t *testing.T) { st := opsman.State() - // Check leafs + // Check leaves l2Block, err := st.GetLastL2Block(ctx, nil) require.NoError(t, err) for addrStr, leaf := range testCase.ExpectedNewLeafs { diff --git a/tools/genesis/genesisparser/genesisparser.go b/tools/genesis/genesisparser/genesisparser.go index 27a037ebe0..d6109ff969 100644 --- a/tools/genesis/genesisparser/genesisparser.go +++ b/tools/genesis/genesisparser/genesisparser.go @@ -16,32 +16,32 @@ type GenesisAccountTest struct { // GenesisTest2Actions change format from testvector to the used internaly func GenesisTest2Actions(accounts []GenesisAccountTest) []*state.GenesisAction { - leafs := make([]*state.GenesisAction, 0) + leaves := make([]*state.GenesisAction, 0) for _, acc := range accounts { if len(acc.Balance) != 0 && acc.Balance != "0" { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeBalance), Value: acc.Balance, }) } if len(acc.Nonce) != 0 && acc.Nonce != "0" { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeNonce), Value: acc.Nonce, }) } if len(acc.Bytecode) != 0 { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeCode), Bytecode: acc.Bytecode, }) } for key, value := range acc.Storage { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeStorage), StoragePosition: key, @@ -49,5 +49,5 @@ func GenesisTest2Actions(accounts []GenesisAccountTest) []*state.GenesisAction { }) } } - return leafs + return leaves }