diff --git a/dot/core/service.go b/dot/core/service.go index e4bc6d846a..cb2552eefb 100644 --- a/dot/core/service.go +++ b/dot/core/service.go @@ -22,8 +22,8 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/services" "github.com/ChainSafe/gossamer/lib/transaction" - cscale "github.com/centrifuge/go-substrate-rpc-client/v3/scale" - ctypes "github.com/centrifuge/go-substrate-rpc-client/v3/types" + cscale "github.com/centrifuge/go-substrate-rpc-client/v4/scale" + ctypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) var ( diff --git a/dot/digest/digest.go b/dot/digest/digest.go index 631be163f8..2d6d8203fa 100644 --- a/dot/digest/digest.go +++ b/dot/digest/digest.go @@ -18,6 +18,10 @@ var ( _ services.Service = &Handler{} ) +var ( + ErrUnknownConsensusEngineID = errors.New("unknown consensus engine ID") +) + // Handler is used to handle consensus messages and relevant authority updates to BABE and GRANDPA type Handler struct { ctx context.Context @@ -32,28 +36,9 @@ type Handler struct { imported chan *types.Block finalised chan *types.FinalisationInfo - // GRANDPA changes - grandpaScheduledChange *grandpaChange - grandpaForcedChange *grandpaChange - grandpaPause *pause - grandpaResume *resume - logger log.LeveledLogger } -type grandpaChange struct { - auths []types.Authority - atBlock uint -} - -type pause struct { - atBlock uint -} - -type resume struct { - atBlock uint -} - // NewHandler returns a new Handler func NewHandler(lvl log.Level, blockState BlockState, epochState EpochState, grandpaState GrandpaState) (*Handler, error) { @@ -91,44 +76,80 @@ func (h *Handler) Stop() error { return nil } -// NextGrandpaAuthorityChange returns the block number of the next upcoming grandpa authorities change. -// It returns 0 if no change is scheduled. -func (h *Handler) NextGrandpaAuthorityChange() (next uint) { - next = ^uint(0) - - if h.grandpaScheduledChange != nil { - next = h.grandpaScheduledChange.atBlock +// HandleDigests handles consensus digests for an imported block +func (h *Handler) HandleDigests(header *types.Header) error { + consensusDigests := h.toConsensusDigests(header.Digest.Types) + consensusDigests, err := checkForGRANDPAForcedChanges(consensusDigests) + if err != nil { + return fmt.Errorf("failed while checking GRANDPA digests: %w", err) } - if h.grandpaForcedChange != nil && h.grandpaForcedChange.atBlock < next { - next = h.grandpaForcedChange.atBlock + for i := range consensusDigests { + // avoiding implicit memory aliasing in for loop, since: + // for _, digest := range consensusDigests { &digest } + // is using the address of a loop variable + digest := consensusDigests[i] + err := h.handleConsensusDigest(&digest, header) + if err != nil { + h.logger.Errorf("cannot handle consensus digest: %w", err) + } } - if h.grandpaPause != nil && h.grandpaPause.atBlock < next { - next = h.grandpaPause.atBlock - } + return nil +} + +// toConsensusDigests converts a slice of scale.VaryingDataType to a slice of types.ConsensusDigest. +func (h *Handler) toConsensusDigests(scaleVaryingTypes []scale.VaryingDataType) []types.ConsensusDigest { + consensusDigests := make([]types.ConsensusDigest, 0, len(scaleVaryingTypes)) + + for _, d := range scaleVaryingTypes { + digest, ok := d.Value().(types.ConsensusDigest) + if !ok { + h.logger.Debugf("digest type not supported: %T", d.Value()) + continue + } - if h.grandpaResume != nil && h.grandpaResume.atBlock < next { - next = h.grandpaResume.atBlock + switch digest.ConsensusEngineID { + case types.GrandpaEngineID, types.BabeEngineID: + consensusDigests = append(consensusDigests, digest) + } } - return next + return consensusDigests } -// HandleDigests handles consensus digests for an imported block -func (h *Handler) HandleDigests(header *types.Header) { - for i, d := range header.Digest.Types { - val, ok := d.Value().(types.ConsensusDigest) - if !ok { +// checkForGRANDPAForcedChanges removes any GrandpaScheduledChange in the presence of a +// GrandpaForcedChange in the same block digest, returning a new slice of types.ConsensusDigest +func checkForGRANDPAForcedChanges(digests []types.ConsensusDigest) ([]types.ConsensusDigest, error) { + var hasForcedChange bool + digestsWithoutScheduled := make([]types.ConsensusDigest, 0, len(digests)) + for _, digest := range digests { + if digest.ConsensusEngineID != types.GrandpaEngineID { + digestsWithoutScheduled = append(digestsWithoutScheduled, digest) continue } - err := h.handleConsensusDigest(&val, header) + data := types.NewGrandpaConsensusDigest() + err := scale.Unmarshal(digest.Data, &data) if err != nil { - h.logger.Errorf("cannot handle digest for block number %d, index %d, digest %s: %s", - header.Number, i, d.Value(), err) + return nil, fmt.Errorf("cannot unmarshal GRANDPA consensus digest: %w", err) + } + + switch data.Value().(type) { + case types.GrandpaScheduledChange: + case types.GrandpaForcedChange: + hasForcedChange = true + digestsWithoutScheduled = append(digestsWithoutScheduled, digest) + default: + digestsWithoutScheduled = append(digestsWithoutScheduled, digest) } } + + if hasForcedChange { + return digestsWithoutScheduled, nil + } + + return digests, nil } func (h *Handler) handleConsensusDigest(d *types.ConsensusDigest, header *types.Header) error { @@ -139,42 +160,19 @@ func (h *Handler) handleConsensusDigest(d *types.ConsensusDigest, header *types. if err != nil { return err } - err = h.handleGrandpaConsensusDigest(data, header) - if err != nil { - return err - } - return nil + + return h.grandpaState.HandleGRANDPADigest(header, data) case types.BabeEngineID: data := types.NewBabeConsensusDigest() err := scale.Unmarshal(d.Data, &data) if err != nil { return err } - err = h.handleBabeConsensusDigest(data, header) - if err != nil { - return err - } - return nil - } - - return errors.New("unknown consensus engine ID") -} -func (h *Handler) handleGrandpaConsensusDigest(digest scale.VaryingDataType, header *types.Header) error { - switch val := digest.Value().(type) { - case types.GrandpaScheduledChange: - return h.handleScheduledChange(val, header) - case types.GrandpaForcedChange: - return h.handleForcedChange(val, header) - case types.GrandpaOnDisabled: - return nil // do nothing, as this is not implemented in substrate - case types.GrandpaPause: - return h.handlePause(val) - case types.GrandpaResume: - return h.handleResume(val) + return h.handleBabeConsensusDigest(data, header) + default: + return fmt.Errorf("%w: 0x%x", ErrUnknownConsensusEngineID, d.ConsensusEngineID.ToBytes()) } - - return errors.New("invalid consensus digest data") } func (h *Handler) handleBabeConsensusDigest(digest scale.VaryingDataType, header *types.Header) error { @@ -194,7 +192,7 @@ func (h *Handler) handleBabeConsensusDigest(digest scale.VaryingDataType, header return nil case types.BABEOnDisabled: - return h.handleBABEOnDisabled(val, header) + return nil case types.NextConfigData: currEpoch, err := h.epochState.GetEpochForBlock(header) @@ -220,10 +218,14 @@ func (h *Handler) handleBlockImport(ctx context.Context) { continue } - h.HandleDigests(&block.Header) - err := h.handleGrandpaChangesOnImport(block.Header.Number) + err := h.HandleDigests(&block.Header) if err != nil { - h.logger.Errorf("failed to handle grandpa changes on block import: %s", err) + h.logger.Errorf("failed to handle digests: %s", err) + } + + err = h.grandpaState.ApplyForcedChanges(&block.Header) + if err != nil { + h.logger.Errorf("failed to apply forced changes: %s", err) } case <-ctx.Done(): return @@ -249,159 +251,13 @@ func (h *Handler) handleBlockFinalisation(ctx context.Context) { h.logger.Errorf("failed to persist babe next epoch config: %s", err) } - err = h.handleGrandpaChangesOnFinalization(info.Header.Number) + err = h.grandpaState.ApplyScheduledChanges(&info.Header) if err != nil { - h.logger.Errorf("failed to handle grandpa changes on block finalisation: %s", err) + h.logger.Errorf("failed to apply scheduled change: %s", err) } + case <-ctx.Done(): return } } } - -func (h *Handler) handleGrandpaChangesOnImport(num uint) error { - resume := h.grandpaResume - if resume != nil && num >= resume.atBlock { - h.grandpaResume = nil - } - - fc := h.grandpaForcedChange - if fc != nil && num >= fc.atBlock { - curr, err := h.grandpaState.IncrementSetID() - if err != nil { - return err - } - - h.grandpaForcedChange = nil - h.logger.Debugf("incremented grandpa set id %d", curr) - } - - return nil -} - -func (h *Handler) handleGrandpaChangesOnFinalization(num uint) error { - pause := h.grandpaPause - if pause != nil && num >= pause.atBlock { - h.grandpaPause = nil - } - - sc := h.grandpaScheduledChange - if sc != nil && num >= sc.atBlock { - curr, err := h.grandpaState.IncrementSetID() - if err != nil { - return err - } - - h.grandpaScheduledChange = nil - h.logger.Debugf("incremented grandpa set id %d", curr) - } - - // if blocks get finalised before forced change takes place, disregard it - h.grandpaForcedChange = nil - return nil -} - -func (h *Handler) handleScheduledChange(sc types.GrandpaScheduledChange, header *types.Header) error { - curr, err := h.blockState.BestBlockHeader() - if err != nil { - return err - } - - if h.grandpaScheduledChange != nil { - return nil - } - - h.logger.Debugf("handling GrandpaScheduledChange data: %v", sc) - - c, err := newGrandpaChange(sc.Auths, sc.Delay, curr.Number) - if err != nil { - return err - } - - h.grandpaScheduledChange = c - - auths, err := types.GrandpaAuthoritiesRawToAuthorities(sc.Auths) - if err != nil { - return err - } - h.logger.Debugf("setting GrandpaScheduledChange at block %d", - header.Number+uint(sc.Delay)) - return h.grandpaState.SetNextChange( - types.NewGrandpaVotersFromAuthorities(auths), - header.Number+uint(sc.Delay), - ) -} - -func (h *Handler) handleForcedChange(fc types.GrandpaForcedChange, header *types.Header) error { - if header == nil { - return errors.New("header is nil") - } - - if h.grandpaForcedChange != nil { - return errors.New("already have forced change scheduled") - } - - h.logger.Debugf("handling GrandpaForcedChange with data %v", fc) - - c, err := newGrandpaChange(fc.Auths, fc.Delay, header.Number) - if err != nil { - return err - } - - h.grandpaForcedChange = c - - auths, err := types.GrandpaAuthoritiesRawToAuthorities(fc.Auths) - if err != nil { - return err - } - - h.logger.Debugf("setting GrandpaForcedChange at block %d", - header.Number+uint(fc.Delay)) - return h.grandpaState.SetNextChange( - types.NewGrandpaVotersFromAuthorities(auths), - header.Number+uint(fc.Delay), - ) -} - -func (h *Handler) handlePause(p types.GrandpaPause) error { - curr, err := h.blockState.BestBlockHeader() - if err != nil { - return err - } - - h.grandpaPause = &pause{ - atBlock: curr.Number + uint(p.Delay), - } - - return h.grandpaState.SetNextPause(h.grandpaPause.atBlock) -} - -func (h *Handler) handleResume(r types.GrandpaResume) error { - curr, err := h.blockState.BestBlockHeader() - if err != nil { - return err - } - - h.grandpaResume = &resume{ - atBlock: curr.Number + uint(r.Delay), - } - - return h.grandpaState.SetNextResume(h.grandpaResume.atBlock) -} - -func newGrandpaChange(raw []types.GrandpaAuthoritiesRaw, delay uint32, currBlock uint) (*grandpaChange, error) { - auths, err := types.GrandpaAuthoritiesRawToAuthorities(raw) - if err != nil { - return nil, err - } - - return &grandpaChange{ - auths: auths, - atBlock: currBlock + uint(delay), - }, nil -} - -func (h *Handler) handleBABEOnDisabled(_ types.BABEOnDisabled, _ *types.Header) error { - h.logger.Debug("handling BABEOnDisabled") - return nil -} diff --git a/dot/digest/digest_test.go b/dot/digest/digest_test.go index dcb0d8633b..fa3414d42b 100644 --- a/dot/digest/digest_test.go +++ b/dot/digest/digest_test.go @@ -13,17 +13,20 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" + "github.com/gtank/merlin" "github.com/stretchr/testify/require" ) //go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/telemetry Client +//go:generate mockgen -destination=mock_grandpa_test.go -package $GOPACKAGE . GrandpaState func newTestHandler(t *testing.T) (*Handler, *state.Service) { testDatadirPath := t.TempDir() @@ -59,6 +62,13 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) { handler.Start() defer handler.Stop() + // create 4 blocks and finalize only blocks 0, 1, 2 + headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 4, false) + for i, h := range headers[:3] { + err := handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0) + require.NoError(t, err) + } + kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -66,7 +76,7 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) { Auths: []types.GrandpaAuthoritiesRaw{ {Key: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), ID: 0}, }, - Delay: 3, + Delay: 0, } var digest = types.NewGrandpaConsensusDigest() @@ -81,25 +91,13 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) { Data: data, } - header := &types.Header{ - Number: 1, - } - - err = handler.handleConsensusDigest(d, header) + // include a GrandpaScheduledChange on a block of number 3 + err = handler.handleConsensusDigest(d, headers[3]) require.NoError(t, err) - headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 2, false) - for i, h := range headers { - err = handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0) - require.NoError(t, err) - } - - // authorities should change on start of block 3 from start - headers, _ = state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 1, false) - for _, h := range headers { - err = handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), 3, 0) - require.NoError(t, err) - } + // finalize block of number 3 + err = handler.blockState.(*state.BlockState).SetFinalisedHash(headers[3].Hash(), 3, 0) + require.NoError(t, err) time.Sleep(time.Millisecond * 500) setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID() @@ -118,6 +116,9 @@ func TestHandler_GrandpaForcedChange(t *testing.T) { handler.Start() defer handler.Stop() + // authorities should change on start of block 4 from start + headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 2, false) + kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -140,17 +141,15 @@ func TestHandler_GrandpaForcedChange(t *testing.T) { Data: data, } - header := &types.Header{ - Number: 1, - } - - err = handler.handleConsensusDigest(d, header) + // tracking the GrandpaForcedChange under block 1 + // and when block number 4 being imported then we should apply the change + err = handler.handleConsensusDigest(d, headers[1]) require.NoError(t, err) - // authorities should change on start of block 4 from start + // create new blocks and import them state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 4, false) - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Millisecond * 500) setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID() require.NoError(t, err) require.Equal(t, uint64(1), setID) @@ -162,180 +161,103 @@ func TestHandler_GrandpaForcedChange(t *testing.T) { require.Equal(t, expected, auths) } -func TestHandler_GrandpaPauseAndResume(t *testing.T) { - handler, _ := newTestHandler(t) - handler.Start() - defer handler.Stop() - - p := types.GrandpaPause{ - Delay: 3, - } - - var digest = types.NewGrandpaConsensusDigest() - err := digest.Set(p) - require.NoError(t, err) - - data, err := scale.Marshal(digest) - require.NoError(t, err) - - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.handleConsensusDigest(d, nil) - require.NoError(t, err) - nextPause, err := handler.grandpaState.(*state.GrandpaState).GetNextPause() - require.NoError(t, err) - require.Equal(t, uint(p.Delay), nextPause) - - headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 3, false) - for i, h := range headers { - handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0) - } - - time.Sleep(time.Millisecond * 100) - require.Nil(t, handler.grandpaPause) - - r := types.GrandpaResume{ - Delay: 3, - } - - var digest2 = types.NewGrandpaConsensusDigest() - err = digest2.Set(r) - require.NoError(t, err) - - data, err = scale.Marshal(digest2) - require.NoError(t, err) - - d = &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.handleConsensusDigest(d, nil) - require.NoError(t, err) - - state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 3, false) - time.Sleep(time.Millisecond * 110) - require.Nil(t, handler.grandpaResume) - - nextResume, err := handler.grandpaState.(*state.GrandpaState).GetNextResume() - require.NoError(t, err) - require.Equal(t, uint(r.Delay+p.Delay), nextResume) -} - -func TestNextGrandpaAuthorityChange_OneChange(t *testing.T) { - handler, _ := newTestHandler(t) - handler.Start() - defer handler.Stop() - - const block uint = 3 - sc := types.GrandpaScheduledChange{ - Auths: []types.GrandpaAuthoritiesRaw{}, - Delay: uint32(block), - } - - var digest = types.NewGrandpaConsensusDigest() - err := digest.Set(sc) - require.NoError(t, err) - - data, err := scale.Marshal(digest) - require.NoError(t, err) - - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - header := &types.Header{ - Number: 1, +func TestMultipleGRANDPADigests_ShouldIncludeJustForcedChanges(t *testing.T) { + tests := map[string]struct { + digestsTypes []scale.VaryingDataTypeValue + expectedHandled []scale.VaryingDataTypeValue + }{ + "forced_and_scheduled_changes_same_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + }, + }, + "only_scheduled_change_in_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaScheduledChange{}, + }, + }, + "more_than_one_forced_changes_in_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + }, + }, + "multiple_consensus_digests_in_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaOnDisabled{}, + types.GrandpaPause{}, + types.GrandpaResume{}, + types.GrandpaForcedChange{}, + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaOnDisabled{}, + types.GrandpaPause{}, + types.GrandpaResume{}, + types.GrandpaForcedChange{}, + }, + }, } - err = handler.handleConsensusDigest(d, header) - require.NoError(t, err) + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + digests := types.NewDigest() - next := handler.NextGrandpaAuthorityChange() - require.Equal(t, block, next) + for _, item := range tt.digestsTypes { + var digest = types.NewGrandpaConsensusDigest() + require.NoError(t, digest.Set(item)) - nextSetID := uint64(1) - auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) - require.NoError(t, err) - expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) - require.NoError(t, err) - require.Equal(t, expected, auths) -} - -func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) { - handler, _ := newTestHandler(t) - handler.Start() - defer handler.Stop() + data, err := scale.Marshal(digest) + require.NoError(t, err) - kr, err := keystore.NewEd25519Keyring() - require.NoError(t, err) + consensusDigest := types.ConsensusDigest{ + ConsensusEngineID: types.GrandpaEngineID, + Data: data, + } - later := uint32(6) - sc := types.GrandpaScheduledChange{ - Auths: []types.GrandpaAuthoritiesRaw{}, - Delay: later, - } + require.NoError(t, digests.Add(consensusDigest)) + } - var digest = types.NewGrandpaConsensusDigest() - err = digest.Set(sc) - require.NoError(t, err) + header := &types.Header{ + Digest: digests, + } - data, err := scale.Marshal(digest) - require.NoError(t, err) + handler, _ := newTestHandler(t) + ctrl := gomock.NewController(t) + grandpaState := NewMockGrandpaState(ctrl) - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } + for _, item := range tt.expectedHandled { + var digest = types.NewGrandpaConsensusDigest() + require.NoError(t, digest.Set(item)) - header := &types.Header{ - Number: 1, - } + data, err := scale.Marshal(digest) + require.NoError(t, err) - err = handler.handleConsensusDigest(d, header) - require.NoError(t, err) + expected := types.NewGrandpaConsensusDigest() + require.NoError(t, scale.Unmarshal(data, &expected)) - nextSetID := uint64(1) - auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) - require.NoError(t, err) - expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) - require.NoError(t, err) - require.Equal(t, expected, auths) + grandpaState.EXPECT().HandleGRANDPADigest(header, expected).Return(nil) + } - const earlier uint = 4 - fc := types.GrandpaForcedChange{ - Auths: []types.GrandpaAuthoritiesRaw{ - {Key: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), ID: 0}, - }, - Delay: uint32(earlier), + handler.grandpaState = grandpaState + handler.HandleDigests(header) + }) } - - digest = types.NewGrandpaConsensusDigest() - err = digest.Set(fc) - require.NoError(t, err) - - data, err = scale.Marshal(digest) - require.NoError(t, err) - - d = &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.handleConsensusDigest(d, header) - require.NoError(t, err) - - next := handler.NextGrandpaAuthorityChange() - require.Equal(t, earlier+1, next) - - auths, err = handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) - require.NoError(t, err) - expected, err = types.NewGrandpaVotersFromAuthoritiesRaw(fc.Auths) - require.NoError(t, err) - require.Equal(t, expected, auths) } func TestHandler_HandleBABEOnDisabled(t *testing.T) { @@ -515,3 +437,71 @@ func TestHandler_HandleNextConfigData(t *testing.T) { require.NoError(t, err) require.Equal(t, act.ToConfigData(), stored) } + +func issueBlocksWithGRANDPAScheduledChanges(t *testing.T, kp *sr25519.Keypair, dh *Handler, + stateSvc *state.Service, parentHeader *types.Header, + sc types.GrandpaScheduledChange, atBlock int, size int) (headers []*types.Header) { + t.Helper() + + transcript := merlin.NewTranscript("BABE") + crypto.AppendUint64(transcript, []byte("slot number"), 1) + crypto.AppendUint64(transcript, []byte("current epoch"), 1) + transcript.AppendMessage([]byte("chain randomness"), []byte{}) + + output, proof, err := kp.VrfSign(transcript) + require.NoError(t, err) + + babePrimaryPreDigest := types.BabePrimaryPreDigest{ + SlotNumber: 1, + VRFOutput: output, + VRFProof: proof, + } + + preRuntimeDigest, err := babePrimaryPreDigest.ToPreRuntimeDigest() + require.NoError(t, err) + + digest := types.NewDigest() + + // include the consensus in the block being produced + if parentHeader.Number+1 == uint(atBlock) { + grandpaConsensusDigest := types.NewGrandpaConsensusDigest() + err = grandpaConsensusDigest.Set(sc) + require.NoError(t, err) + + grandpaDigest, err := scale.Marshal(grandpaConsensusDigest) + require.NoError(t, err) + + consensusDigest := types.ConsensusDigest{ + ConsensusEngineID: types.GrandpaEngineID, + Data: grandpaDigest, + } + require.NoError(t, digest.Add(*preRuntimeDigest, consensusDigest)) + } else { + require.NoError(t, digest.Add(*preRuntimeDigest)) + } + + header := &types.Header{ + ParentHash: parentHeader.Hash(), + Number: parentHeader.Number + 1, + Digest: digest, + } + + block := &types.Block{ + Header: *header, + Body: *types.NewBody([]types.Extrinsic{}), + } + + err = stateSvc.Block.AddBlock(block) + require.NoError(t, err) + + dh.HandleDigests(header) + + headers = append(headers, header) + + if size > 0 { + nestedHeaders := issueBlocksWithGRANDPAScheduledChanges(t, kp, dh, stateSvc, header, sc, atBlock, size-1) + headers = append(headers, nestedHeaders...) + } + + return headers +} diff --git a/dot/digest/interface.go b/dot/digest/interface.go index 268ae36c7b..2913697d0f 100644 --- a/dot/digest/interface.go +++ b/dot/digest/interface.go @@ -7,6 +7,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/grandpa" + "github.com/ChainSafe/gossamer/pkg/scale" ) // BlockState interface for block state methods @@ -39,4 +40,8 @@ type GrandpaState interface { SetNextPause(number uint) error SetNextResume(number uint) error GetCurrentSetID() (uint64, error) + + HandleGRANDPADigest(header *types.Header, digest scale.VaryingDataType) error + ApplyScheduledChanges(finalizedHeader *types.Header) error + ApplyForcedChanges(importedHeader *types.Header) error } diff --git a/dot/digest/mock_grandpa_test.go b/dot/digest/mock_grandpa_test.go new file mode 100644 index 0000000000..e71def0777 --- /dev/null +++ b/dot/digest/mock_grandpa_test.go @@ -0,0 +1,150 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/digest (interfaces: GrandpaState) + +// Package digest is a generated GoMock package. +package digest + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + scale "github.com/ChainSafe/gossamer/pkg/scale" + gomock "github.com/golang/mock/gomock" +) + +// MockGrandpaState is a mock of GrandpaState interface. +type MockGrandpaState struct { + ctrl *gomock.Controller + recorder *MockGrandpaStateMockRecorder +} + +// MockGrandpaStateMockRecorder is the mock recorder for MockGrandpaState. +type MockGrandpaStateMockRecorder struct { + mock *MockGrandpaState +} + +// NewMockGrandpaState creates a new mock instance. +func NewMockGrandpaState(ctrl *gomock.Controller) *MockGrandpaState { + mock := &MockGrandpaState{ctrl: ctrl} + mock.recorder = &MockGrandpaStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGrandpaState) EXPECT() *MockGrandpaStateMockRecorder { + return m.recorder +} + +// ApplyForcedChanges mocks base method. +func (m *MockGrandpaState) ApplyForcedChanges(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyForcedChanges", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyForcedChanges indicates an expected call of ApplyForcedChanges. +func (mr *MockGrandpaStateMockRecorder) ApplyForcedChanges(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyForcedChanges", reflect.TypeOf((*MockGrandpaState)(nil).ApplyForcedChanges), arg0) +} + +// ApplyScheduledChanges mocks base method. +func (m *MockGrandpaState) ApplyScheduledChanges(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyScheduledChanges", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyScheduledChanges indicates an expected call of ApplyScheduledChanges. +func (mr *MockGrandpaStateMockRecorder) ApplyScheduledChanges(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyScheduledChanges", reflect.TypeOf((*MockGrandpaState)(nil).ApplyScheduledChanges), arg0) +} + +// GetCurrentSetID mocks base method. +func (m *MockGrandpaState) GetCurrentSetID() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSetID") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSetID indicates an expected call of GetCurrentSetID. +func (mr *MockGrandpaStateMockRecorder) GetCurrentSetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSetID", reflect.TypeOf((*MockGrandpaState)(nil).GetCurrentSetID)) +} + +// HandleGRANDPADigest mocks base method. +func (m *MockGrandpaState) HandleGRANDPADigest(arg0 *types.Header, arg1 scale.VaryingDataType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleGRANDPADigest", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// HandleGRANDPADigest indicates an expected call of HandleGRANDPADigest. +func (mr *MockGrandpaStateMockRecorder) HandleGRANDPADigest(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleGRANDPADigest", reflect.TypeOf((*MockGrandpaState)(nil).HandleGRANDPADigest), arg0, arg1) +} + +// IncrementSetID mocks base method. +func (m *MockGrandpaState) IncrementSetID() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrementSetID") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IncrementSetID indicates an expected call of IncrementSetID. +func (mr *MockGrandpaStateMockRecorder) IncrementSetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementSetID", reflect.TypeOf((*MockGrandpaState)(nil).IncrementSetID)) +} + +// SetNextChange mocks base method. +func (m *MockGrandpaState) SetNextChange(arg0 []types.GrandpaVoter, arg1 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNextChange", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNextChange indicates an expected call of SetNextChange. +func (mr *MockGrandpaStateMockRecorder) SetNextChange(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextChange", reflect.TypeOf((*MockGrandpaState)(nil).SetNextChange), arg0, arg1) +} + +// SetNextPause mocks base method. +func (m *MockGrandpaState) SetNextPause(arg0 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNextPause", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNextPause indicates an expected call of SetNextPause. +func (mr *MockGrandpaStateMockRecorder) SetNextPause(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextPause", reflect.TypeOf((*MockGrandpaState)(nil).SetNextPause), arg0) +} + +// SetNextResume mocks base method. +func (m *MockGrandpaState) SetNextResume(arg0 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNextResume", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNextResume indicates an expected call of SetNextResume. +func (mr *MockGrandpaStateMockRecorder) SetNextResume(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextResume", reflect.TypeOf((*MockGrandpaState)(nil).SetNextResume), arg0) +} diff --git a/dot/mock_node_builder_test.go b/dot/mock_node_builder_test.go index 569d20cf31..7c08c9f0dc 100644 --- a/dot/mock_node_builder_test.go +++ b/dot/mock_node_builder_test.go @@ -108,18 +108,18 @@ func (mr *MocknodeBuilderIfaceMockRecorder) createDigestHandler(lvl, st interfac } // createGRANDPAService mocks base method. -func (m *MocknodeBuilderIface) createGRANDPAService(cfg *Config, st *state.Service, dh *digest.Handler, ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { +func (m *MocknodeBuilderIface) createGRANDPAService(cfg *Config, st *state.Service, ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "createGRANDPAService", cfg, st, dh, ks, net, telemetryMailer) + ret := m.ctrl.Call(m, "createGRANDPAService", cfg, st, ks, net, telemetryMailer) ret0, _ := ret[0].(*grandpa.Service) ret1, _ := ret[1].(error) return ret0, ret1 } // createGRANDPAService indicates an expected call of createGRANDPAService. -func (mr *MocknodeBuilderIfaceMockRecorder) createGRANDPAService(cfg, st, dh, ks, net, telemetryMailer interface{}) *gomock.Call { +func (mr *MocknodeBuilderIfaceMockRecorder) createGRANDPAService(cfg, st, ks, net, telemetryMailer interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createGRANDPAService", reflect.TypeOf((*MocknodeBuilderIface)(nil).createGRANDPAService), cfg, st, dh, ks, net, telemetryMailer) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createGRANDPAService", reflect.TypeOf((*MocknodeBuilderIface)(nil).createGRANDPAService), cfg, st, ks, net, telemetryMailer) } // createNetworkService mocks base method. diff --git a/dot/node.go b/dot/node.go index 0a5a0f9264..86c6aa7de5 100644 --- a/dot/node.go +++ b/dot/node.go @@ -62,7 +62,7 @@ type nodeBuilderIface interface { createDigestHandler(lvl log.Level, st *state.Service) (*digest.Handler, error) createCoreService(cfg *Config, ks *keystore.GlobalKeystore, st *state.Service, net *network.Service, dh *digest.Handler) (*core.Service, error) - createGRANDPAService(cfg *Config, st *state.Service, dh *digest.Handler, ks keystore.Keystore, + createGRANDPAService(cfg *Config, st *state.Service, ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) newSyncService(cfg *Config, st *state.Service, fg dotsync.FinalityGadget, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer telemetry.Client) (*dotsync.Service, error) @@ -341,7 +341,7 @@ func newNode(cfg *Config, } nodeSrvcs = append(nodeSrvcs, coreSrvc) - fg, err := builder.createGRANDPAService(cfg, stateSrvc, dh, ks.Gran, networkSrvc, telemetryMailer) + fg, err := builder.createGRANDPAService(cfg, stateSrvc, ks.Gran, networkSrvc, telemetryMailer) if err != nil { return nil, err } diff --git a/dot/node_test.go b/dot/node_test.go index 1a19e40ee8..6f56557797 100644 --- a/dot/node_test.go +++ b/dot/node_test.go @@ -197,7 +197,7 @@ func TestNewNode(t *testing.T) { gomock.AssignableToTypeOf(&network.Service{}), &digest.Handler{}). Return(&core.Service{}, nil) m.EXPECT().createGRANDPAService(dotConfig, gomock.AssignableToTypeOf(&state.Service{}), - &digest.Handler{}, ks.Gran, gomock.AssignableToTypeOf(&network.Service{}), + ks.Gran, gomock.AssignableToTypeOf(&network.Service{}), gomock.AssignableToTypeOf(&telemetry.Mailer{})). Return(&grandpa.Service{}, nil) m.EXPECT().newSyncService(dotConfig, gomock.AssignableToTypeOf(&state.Service{}), &grandpa.Service{}, diff --git a/dot/rpc/modules/system.go b/dot/rpc/modules/system.go index 79c7e6e6af..89b33992a0 100644 --- a/dot/rpc/modules/system.go +++ b/dot/rpc/modules/system.go @@ -14,7 +14,7 @@ import ( "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/btcsuite/btcutil/base58" - ctypes "github.com/centrifuge/go-substrate-rpc-client/v3/types" + ctypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) // SystemModule is an RPC module providing access to core API points diff --git a/dot/services.go b/dot/services.go index 191e80e1e6..5ee1e7e939 100644 --- a/dot/services.go +++ b/dot/services.go @@ -392,8 +392,8 @@ func (nodeBuilder) createSystemService(cfg *types.SystemInfo, stateSrvc *state.S } // createGRANDPAService creates a new GRANDPA service -func (nodeBuilder) createGRANDPAService(cfg *Config, st *state.Service, dh *digest.Handler, - ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { +func (nodeBuilder) createGRANDPAService(cfg *Config, st *state.Service, ks keystore.Keystore, + net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { rt, err := st.Block.GetRuntime(nil) if err != nil { return nil, err @@ -416,15 +416,14 @@ func (nodeBuilder) createGRANDPAService(cfg *Config, st *state.Service, dh *dige } gsCfg := &grandpa.Config{ - LogLvl: cfg.Log.FinalityGadgetLvl, - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: dh, - Voters: voters, - Authority: cfg.Core.GrandpaAuthority, - Network: net, - Interval: cfg.Core.GrandpaInterval, - Telemetry: telemetryMailer, + LogLvl: cfg.Log.FinalityGadgetLvl, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Authority: cfg.Core.GrandpaAuthority, + Network: net, + Interval: cfg.Core.GrandpaInterval, + Telemetry: telemetryMailer, } if cfg.Core.GrandpaAuthority { diff --git a/dot/services_integration_test.go b/dot/services_integration_test.go index 157f493d0d..1a7252eda6 100644 --- a/dot/services_integration_test.go +++ b/dot/services_integration_test.go @@ -298,9 +298,6 @@ func TestCreateGrandpaService(t *testing.T) { err = builder.loadRuntime(cfg, ns, stateSrvc, ks, &network.Service{}) require.NoError(t, err) - dh, err := builder.createDigestHandler(cfg.Log.DigestLvl, stateSrvc) - require.NoError(t, err) - networkConfig := &network.Config{ BasePath: t.TempDir(), NoBootstrap: true, @@ -311,7 +308,7 @@ func TestCreateGrandpaService(t *testing.T) { testNetworkService, err := network.NewService(networkConfig) require.NoError(t, err) - gs, err := builder.createGRANDPAService(cfg, stateSrvc, dh, ks.Gran, testNetworkService, nil) + gs, err := builder.createGRANDPAService(cfg, stateSrvc, ks.Gran, testNetworkService, nil) require.NoError(t, err) require.NotNil(t, gs) } diff --git a/dot/services_test.go b/dot/services_test.go index e54e2ff053..a1e9feee38 100644 --- a/dot/services_test.go +++ b/dot/services_test.go @@ -449,7 +449,7 @@ func Test_nodeBuilder_createGRANDPAService(t *testing.T) { networkSrvc, err := network.NewService(networkConfig) require.NoError(t, err) builder := nodeBuilder{} - got, err := builder.createGRANDPAService(cfg, stateSrvc, nil, tt.ks, networkSrvc, + got, err := builder.createGRANDPAService(cfg, stateSrvc, tt.ks, networkSrvc, nil) assert.ErrorIs(t, err, tt.err) // TODO: create interface for grandpa.NewService to enable testing with assert.Equal diff --git a/dot/state/grandpa.go b/dot/state/grandpa.go index 32850e9dec..50fbce2536 100644 --- a/dot/state/grandpa.go +++ b/dot/state/grandpa.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "errors" "fmt" + "sync" "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" @@ -14,6 +15,15 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) +var ( + errPendingScheduledChanges = errors.New("pending scheduled changes needs to be applied") + errDuplicateHashes = errors.New("duplicated hashes") + errAlreadyHasForcedChange = errors.New("already has a forced change") + errUnfinalizedAncestor = errors.New("unfinalized ancestor") + + ErrNoNextAuthorityChange = errors.New("no next authority change") +) + var ( genesisSetID = uint64(0) grandpaPrefix = "grandpa" @@ -26,40 +36,276 @@ var ( // GrandpaState tracks information related to grandpa type GrandpaState struct { - db chaindb.Database + db chaindb.Database + blockState *BlockState + + forksLock sync.RWMutex + + forcedChanges *orderedPendingChanges + scheduledChangeRoots *changeTree } // NewGrandpaStateFromGenesis returns a new GrandpaState given the grandpa genesis authorities -func NewGrandpaStateFromGenesis(db chaindb.Database, genesisAuthorities []types.GrandpaVoter) (*GrandpaState, error) { +func NewGrandpaStateFromGenesis(db chaindb.Database, bs *BlockState, + genesisAuthorities []types.GrandpaVoter) (*GrandpaState, error) { grandpaDB := chaindb.NewTable(db, grandpaPrefix) s := &GrandpaState{ - db: grandpaDB, + db: grandpaDB, + blockState: bs, + scheduledChangeRoots: new(changeTree), + forcedChanges: new(orderedPendingChanges), } if err := s.setCurrentSetID(genesisSetID); err != nil { - return nil, err + return nil, fmt.Errorf("cannot set current set id: %w", err) } if err := s.SetLatestRound(0); err != nil { - return nil, err + return nil, fmt.Errorf("cannot set latest round: %w", err) } if err := s.setAuthorities(genesisSetID, genesisAuthorities); err != nil { - return nil, err + return nil, fmt.Errorf("cannot set authorities: %w", err) } - if err := s.setSetIDChangeAtBlock(genesisSetID, 0); err != nil { - return nil, err + if err := s.setChangeSetIDAtBlock(genesisSetID, 0); err != nil { + return nil, fmt.Errorf("cannot set change set id at block 0: %w", err) } return s, nil } // NewGrandpaState returns a new GrandpaState -func NewGrandpaState(db chaindb.Database) (*GrandpaState, error) { +func NewGrandpaState(db chaindb.Database, bs *BlockState) *GrandpaState { return &GrandpaState{ - db: chaindb.NewTable(db, grandpaPrefix), - }, nil + db: chaindb.NewTable(db, grandpaPrefix), + blockState: bs, + scheduledChangeRoots: new(changeTree), + forcedChanges: new(orderedPendingChanges), + } +} + +// HandleGRANDPADigest receives a decoded GRANDPA digest and calls the right function to handles the digest +func (s *GrandpaState) HandleGRANDPADigest(header *types.Header, digest scale.VaryingDataType) error { + switch val := digest.Value().(type) { + case types.GrandpaScheduledChange: + return s.addScheduledChange(header, val) + case types.GrandpaForcedChange: + return s.addForcedChange(header, val) + case types.GrandpaOnDisabled: + return nil + case types.GrandpaPause: + logger.Warn("GRANDPA Pause consensus message not implemented yet") + return nil + case types.GrandpaResume: + logger.Warn("GRANDPA Resume consensus message not implemented yet") + return nil + default: + return fmt.Errorf("not supported digest") + } +} + +func (s *GrandpaState) addForcedChange(header *types.Header, fc types.GrandpaForcedChange) error { + auths, err := types.GrandpaAuthoritiesRawToAuthorities(fc.Auths) + if err != nil { + return fmt.Errorf("cannot parse GRANDPA authorities to raw authorities: %w", err) + } + + pendingChange := pendingChange{ + bestFinalizedNumber: fc.BestFinalizedBlock, + nextAuthorities: auths, + announcingHeader: header, + delay: fc.Delay, + } + + err = s.forcedChanges.importChange(pendingChange, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot import forced change: %w", err) + } + + logger.Debugf("there are now %d possible forced changes", s.forcedChanges.Len()) + return nil +} + +func (s *GrandpaState) addScheduledChange(header *types.Header, sc types.GrandpaScheduledChange) error { + auths, err := types.GrandpaAuthoritiesRawToAuthorities(sc.Auths) + if err != nil { + return fmt.Errorf("cannot parse GRANPDA authorities to raw authorities: %w", err) + } + + pendingChange := &pendingChange{ + nextAuthorities: auths, + announcingHeader: header, + delay: sc.Delay, + } + + err = s.scheduledChangeRoots.importChange(pendingChange, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot import scheduled change: %w", err) + } + + logger.Debugf("there are now %d possible scheduled change roots", s.scheduledChangeRoots.Len()) + return nil +} + +// ApplyScheduledChanges will check the schedules changes in order to find a root +// equal or behind the finalized number and will apply its authority set changes +func (s *GrandpaState) ApplyScheduledChanges(finalizedHeader *types.Header) error { + finalizedHash := finalizedHeader.Hash() + + err := s.forcedChanges.pruneChanges(finalizedHash, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot prune non-descendant forced changes: %w", err) + } + + if s.scheduledChangeRoots.Len() == 0 { + return nil + } + + changeToApply, err := s.scheduledChangeRoots.findApplicable(finalizedHash, + finalizedHeader.Number, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot get applicable scheduled change: %w", err) + } + + if changeToApply == nil { + return nil + } + + logger.Debugf("applying scheduled change: %s", changeToApply.change) + + newSetID, err := s.IncrementSetID() + if err != nil { + return fmt.Errorf("cannot increment set id: %w", err) + } + + grandpaVotersAuthorities := types.NewGrandpaVotersFromAuthorities(changeToApply.change.nextAuthorities) + err = s.setAuthorities(newSetID, grandpaVotersAuthorities) + if err != nil { + return fmt.Errorf("cannot set authorities: %w", err) + } + + err = s.setChangeSetIDAtBlock(newSetID, changeToApply.change.effectiveNumber()) + if err != nil { + return fmt.Errorf("cannot set the change set id at block: %w", err) + } + + logger.Debugf("Applying authority set change scheduled at block #%d", + changeToApply.change.announcingHeader.Number) + + // TODO: add afg.applying_scheduled_authority_set_change telemetry info here + return nil +} + +// ApplyForcedChanges will check for if there is a scheduled forced change relative to the +// imported block and then apply it otherwise nothing happens +func (s *GrandpaState) ApplyForcedChanges(importedBlockHeader *types.Header) error { + forcedChange, err := s.forcedChanges.findApplicable(importedBlockHeader.Hash(), + importedBlockHeader.Number, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot find applicable forced change: %w", err) + } else if forcedChange == nil { + return nil + } + + forcedChangeHash := forcedChange.announcingHeader.Hash() + bestFinalizedNumber := forcedChange.bestFinalizedNumber + + dependant, err := s.scheduledChangeRoots.lookupChangeWhere(func(pcn *pendingChangeNode) (bool, error) { + if pcn.change.effectiveNumber() > uint(bestFinalizedNumber) { + return false, nil + } + + scheduledBlockHash := pcn.change.announcingHeader.Hash() + return s.blockState.IsDescendantOf(scheduledBlockHash, forcedChangeHash) + }) + if err != nil { + return fmt.Errorf("cannot check pending changes while applying forced change: %w", err) + } else if dependant != nil { + return fmt.Errorf("%w: %s", errPendingScheduledChanges, dependant.change) + } + + logger.Debugf("applying forced change: %s", forcedChange) + + // TODO: send the telemetry messages here + // afg.applying_forced_authority_set_change + + currentSetID, err := s.GetCurrentSetID() + if err != nil { + return fmt.Errorf("cannot get current set id: %w", err) + } + + err = s.setChangeSetIDAtBlock(currentSetID, uint(forcedChange.bestFinalizedNumber)) + if err != nil { + return fmt.Errorf("cannot set change set id at block: %w", err) + } + + newSetID, err := s.IncrementSetID() + if err != nil { + return fmt.Errorf("cannot increment set id: %w", err) + } + + grandpaVotersAuthorities := types.NewGrandpaVotersFromAuthorities(forcedChange.nextAuthorities) + err = s.setAuthorities(newSetID, grandpaVotersAuthorities) + if err != nil { + return fmt.Errorf("cannot set authorities: %w", err) + } + + err = s.setChangeSetIDAtBlock(newSetID, forcedChange.effectiveNumber()) + if err != nil { + return fmt.Errorf("cannot set change set id at block") + } + + logger.Debugf("Applying authority set forced change at block #%d", + forcedChange.announcingHeader.Number) + + return nil +} + +// NextGrandpaAuthorityChange returns the block number of the next upcoming grandpa authorities change. +// It returns 0 if no change is scheduled. +func (s *GrandpaState) NextGrandpaAuthorityChange(bestBlockHash common.Hash, bestBlockNumber uint) ( + blockNumber uint, err error) { + forcedChange, err := s.forcedChanges.lookupChangeWhere(func(pc pendingChange) (bool, error) { + isDecendant, err := s.blockState.IsDescendantOf(pc.announcingHeader.Hash(), bestBlockHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + return isDecendant && pc.effectiveNumber() <= bestBlockNumber, nil + }) + if err != nil { + return 0, fmt.Errorf("cannot get forced change on chain of %s: %w", + bestBlockHash, err) + } + + scheduledChangeNode, err := s.scheduledChangeRoots.lookupChangeWhere(func(pcn *pendingChangeNode) (bool, error) { + isDecendant, err := s.blockState.IsDescendantOf(pcn.change.announcingHeader.Hash(), bestBlockHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + return isDecendant && pcn.change.effectiveNumber() <= bestBlockNumber, nil + }) + if err != nil { + return 0, fmt.Errorf("cannot get forced change on chain of %s: %w", + bestBlockHash, err) + } + + var next uint + if scheduledChangeNode != nil { + next = scheduledChangeNode.change.effectiveNumber() + } + + if forcedChange != nil && (forcedChange.effectiveNumber() < next || next == 0) { + next = forcedChange.effectiveNumber() + } + + if next == 0 { + return 0, ErrNoNextAuthorityChange + } + + return next, nil } func authoritiesKey(setID uint64) []byte { @@ -152,7 +398,7 @@ func (s *GrandpaState) SetNextChange(authorities []types.GrandpaVoter, number ui return err } - err = s.setSetIDChangeAtBlock(nextSetID, number) + err = s.setChangeSetIDAtBlock(nextSetID, number) if err != nil { return err } @@ -177,7 +423,7 @@ func (s *GrandpaState) IncrementSetID() (newSetID uint64, err error) { } // setSetIDChangeAtBlock sets a set ID change at a certain block -func (s *GrandpaState) setSetIDChangeAtBlock(setID uint64, number uint) error { +func (s *GrandpaState) setChangeSetIDAtBlock(setID uint64, number uint) error { return s.db.Put(setIDChangeKey(setID), common.UintToBytes(number)) } @@ -206,8 +452,7 @@ func (s *GrandpaState) GetSetIDByBlockNumber(blockNumber uint) (uint64, error) { } curr = curr - 1 continue - } - if err != nil { + } else if err != nil { return 0, err } diff --git a/dot/state/grandpa_changes.go b/dot/state/grandpa_changes.go new file mode 100644 index 0000000000..f28ef37339 --- /dev/null +++ b/dot/state/grandpa_changes.go @@ -0,0 +1,316 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package state + +import ( + "fmt" + "sort" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +type conditionFunc[T any] func(T) (bool, error) +type isDescendantOfFunc func(parent, child common.Hash) (bool, error) + +type pendingChange struct { + bestFinalizedNumber uint32 + delay uint32 + nextAuthorities []types.Authority + announcingHeader *types.Header +} + +func (p pendingChange) String() string { + return fmt.Sprintf("announcing header: %s (%d), delay: %d, next authorities: %d", + p.announcingHeader.Hash(), p.announcingHeader.Number, p.delay, len(p.nextAuthorities)) +} + +func (p *pendingChange) effectiveNumber() uint { + return p.announcingHeader.Number + uint(p.delay) +} + +type orderedPendingChanges []pendingChange + +func (oc *orderedPendingChanges) Len() int { return len(*oc) } + +// findApplicable try to retrieve an applicable change from the slice of forced changes +func (oc orderedPendingChanges) findApplicable(importedHash common.Hash, importedNumber uint, + isDescendatOf isDescendantOfFunc) (*pendingChange, error) { + + return oc.lookupChangeWhere(func(forced pendingChange) (bool, error) { + announcingHash := forced.announcingHeader.Hash() + effectiveNumber := forced.effectiveNumber() + + if importedHash.Equal(announcingHash) && effectiveNumber == importedNumber { + return true, nil + } + + isDescendant, err := isDescendatOf(announcingHash, importedHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + return isDescendant && effectiveNumber == importedNumber, nil + }) + +} + +// lookupChangeWhere return the first pending change which satisfy the condition +func (oc orderedPendingChanges) lookupChangeWhere(condition conditionFunc[pendingChange]) ( + pendingChange *pendingChange, err error) { + for _, change := range oc { + ok, err := condition(change) + if err != nil { + return pendingChange, fmt.Errorf("failed while applying condition: %w", err) + } + + if ok { + return &change, nil + } + } + + return nil, nil //nolint:nilnil +} + +// importChange only tracks the pending change if and only if it is the +// unique forced change in its fork, otherwise will return an error +func (oc *orderedPendingChanges) importChange(pendingChange pendingChange, isDescendantOf isDescendantOfFunc) error { + announcingHeader := pendingChange.announcingHeader.Hash() + + for _, change := range *oc { + changeBlockHash := change.announcingHeader.Hash() + + if changeBlockHash.Equal(announcingHeader) { + return fmt.Errorf("%w: %s", errDuplicateHashes, changeBlockHash) + } + + isDescendant, err := isDescendantOf(changeBlockHash, announcingHeader) + if err != nil { + return fmt.Errorf("cannot verify ancestry: %w", err) + } + + if isDescendant { + return fmt.Errorf("%w: for block hash %s", errAlreadyHasForcedChange, changeBlockHash) + } + } + + orderedChanges := *oc + + // Use a binary search to include the pending change in the right position + // of a slice ordered by the effective number and by announcing header number + idxToInsert := sort.Search(oc.Len(), func(i int) bool { + return orderedChanges[i].effectiveNumber() >= pendingChange.effectiveNumber() && + orderedChanges[i].announcingHeader.Number >= pendingChange.announcingHeader.Number + }) + + orderedChanges = append(orderedChanges, pendingChange) + copy(orderedChanges[idxToInsert+1:], orderedChanges[idxToInsert:]) + orderedChanges[idxToInsert] = pendingChange + *oc = orderedChanges + + return nil +} + +// pruneChanges will remove changes whose are not descendant of the hash argument +// this function updates the current state of the change tree +func (oc *orderedPendingChanges) pruneChanges(hash common.Hash, isDescendantOf isDescendantOfFunc) error { + onBranchForcedChanges := make([]pendingChange, 0, oc.Len()) + + for _, forcedChange := range *oc { + isDescendant, err := isDescendantOf(hash, forcedChange.announcingHeader.Hash()) + if err != nil { + return fmt.Errorf("cannot verify ancestry: %w", err) + } + + if isDescendant { + onBranchForcedChanges = append(onBranchForcedChanges, forcedChange) + } + } + + *oc = onBranchForcedChanges + return nil +} + +type pendingChangeNode struct { + change *pendingChange + nodes []*pendingChangeNode +} + +// importNode method is called recursivelly until we found a node that import the pending change as one of +// its children. The node which should import the pending change must be a ancestor with a +// lower block number than the pending change. +func (c *pendingChangeNode) importNode(blockHash common.Hash, blockNumber uint, pendingChange *pendingChange, + isDescendantOf isDescendantOfFunc) (imported bool, err error) { + announcingHash := c.change.announcingHeader.Hash() + + if blockHash.Equal(announcingHash) { + return false, fmt.Errorf("%w: %s", errDuplicateHashes, blockHash) + } + + isDescendant, err := isDescendantOf(announcingHash, blockHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + if !isDescendant { + return false, nil + } + + if blockNumber <= c.change.announcingHeader.Number { + return false, nil + } + + for _, childrenNodes := range c.nodes { + imported, err := childrenNodes.importNode(blockHash, blockNumber, pendingChange, isDescendantOf) + if err != nil { + return false, err + } + + if imported { + return true, nil + } + } + + childrenNode := &pendingChangeNode{change: pendingChange} + c.nodes = append(c.nodes, childrenNode) + return true, nil +} + +// changeTree keeps track of the changes per fork allowing +// n forks in the same structure, this structure is intended +// to be an acyclic directed graph where the change nodes are +// placed by descendency order and number, you can ensure an +// node ancestry using the `isDescendantOfFunc` +type changeTree []*pendingChangeNode + +func (ct changeTree) Len() int { return len(ct) } +func (ct *changeTree) importChange(pendingChange *pendingChange, isDescendantOf isDescendantOfFunc) error { + for _, root := range *ct { + imported, err := root.importNode(pendingChange.announcingHeader.Hash(), + pendingChange.announcingHeader.Number, pendingChange, isDescendantOf) + + if err != nil { + return err + } + + if imported { + logger.Debugf("changes on header %s (%d) imported successfully", + pendingChange.announcingHeader.Hash(), pendingChange.announcingHeader.Number) + return nil + } + } + + pendingChangeNode := &pendingChangeNode{ + change: pendingChange, + } + + *ct = append(*ct, pendingChangeNode) + return nil +} + +// lookupChangesWhere returns the first change which satisfy the +// condition whithout modify the current state of the change tree +func (ct changeTree) lookupChangeWhere(condition conditionFunc[*pendingChangeNode]) ( + changeNode *pendingChangeNode, err error) { + for _, root := range ct { + ok, err := condition(root) + if err != nil { + return nil, fmt.Errorf("failed while applying condition: %w", err) + } + + if ok { + return root, nil + } + } + + return nil, nil //nolint:nilnil +} + +// findApplicable try to retrieve an applicable change +// from the tree, if it finds a change node then it will update the +// tree roots with the change node's children otherwise it will +// prune nodes that does not belongs to the same chain as `hash` argument +func (ct *changeTree) findApplicable(hash common.Hash, number uint, + isDescendantOf isDescendantOfFunc) (changeNode *pendingChangeNode, err error) { + + changeNode, err = ct.findApplicableChange(hash, number, isDescendantOf) + if err != nil { + return nil, err + } + + if changeNode == nil { + err := ct.pruneChanges(hash, isDescendantOf) + if err != nil { + return nil, fmt.Errorf("cannot prune changes: %w", err) + } + } else { + *ct = make([]*pendingChangeNode, len(changeNode.nodes)) + copy(*ct, changeNode.nodes) + } + + return changeNode, nil +} + +// findApplicableChange iterates through the change tree +// roots looking for the change node which: +// 1. contains the same hash as the one we're looking for. +// 2. contains a lower or equal effective number as the one we're looking for. +// 3. does not contains pending changes to be applied. +func (ct changeTree) findApplicableChange(hash common.Hash, number uint, + isDescendantOf isDescendantOfFunc) (changeNode *pendingChangeNode, err error) { + return ct.lookupChangeWhere(func(pcn *pendingChangeNode) (bool, error) { + if pcn.change.effectiveNumber() > number { + return false, nil + } + + changeNodeHash := pcn.change.announcingHeader.Hash() + if !hash.Equal(changeNodeHash) { + isDescendant, err := isDescendantOf(changeNodeHash, hash) + if err != nil { + return false, fmt.Errorf("cannot verify ancestry: %w", err) + } + + if !isDescendant { + return false, nil + } + } + + // the changes must be applied in order, so we need to check if our finalized header + // is ahead of any children, if it is that means some previous change was not applied + for _, child := range pcn.nodes { + isDescendant, err := isDescendantOf(child.change.announcingHeader.Hash(), hash) + if err != nil { + return false, fmt.Errorf("cannot verify ancestry: %w", err) + } + + if child.change.announcingHeader.Number <= number && isDescendant { + return false, errUnfinalizedAncestor + } + } + + return true, nil + }) +} + +// pruneChanges will remove changes whose are not descendant of the hash argument +// this function updates the current state of the change tree +func (ct *changeTree) pruneChanges(hash common.Hash, isDescendantOf isDescendantOfFunc) error { + onBranchChanges := []*pendingChangeNode{} + + for _, root := range *ct { + scheduledChangeHash := root.change.announcingHeader.Hash() + + isDescendant, err := isDescendantOf(hash, scheduledChangeHash) + if err != nil { + return fmt.Errorf("cannot verify ancestry: %w", err) + } + + if isDescendant { + onBranchChanges = append(onBranchChanges, root) + } + } + + *ct = onBranchChanges + return nil +} diff --git a/dot/state/grandpa_test.go b/dot/state/grandpa_test.go index 20bf45869a..7b58148b50 100644 --- a/dot/state/grandpa_test.go +++ b/dot/state/grandpa_test.go @@ -4,11 +4,19 @@ package state import ( + "fmt" "testing" + "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/golang/mock/gomock" + "github.com/gtank/merlin" "github.com/stretchr/testify/require" ) @@ -22,7 +30,7 @@ var ( func TestNewGrandpaStateFromGenesis(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) currSetID, err := gs.GetCurrentSetID() @@ -40,7 +48,7 @@ func TestNewGrandpaStateFromGenesis(t *testing.T) { func TestGrandpaState_SetNextChange(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) err = gs.SetNextChange(testAuths, 1) @@ -57,7 +65,7 @@ func TestGrandpaState_SetNextChange(t *testing.T) { func TestGrandpaState_IncrementSetID(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) setID, err := gs.IncrementSetID() @@ -67,7 +75,7 @@ func TestGrandpaState_IncrementSetID(t *testing.T) { func TestGrandpaState_GetSetIDByBlockNumber(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) err = gs.SetNextChange(testAuths, 100) @@ -100,7 +108,7 @@ func TestGrandpaState_GetSetIDByBlockNumber(t *testing.T) { func TestGrandpaState_LatestRound(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) r, err := gs.GetLatestRound() @@ -114,3 +122,1237 @@ func TestGrandpaState_LatestRound(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(99), r) } + +func testBlockState(t *testing.T, db chaindb.Database) *BlockState { + ctrl := gomock.NewController(t) + telemetryMock := NewMockClient(ctrl) + telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + header := testGenesisHeader + + bs, err := NewBlockStateFromGenesis(db, newTriesEmpty(), header, telemetryMock) + require.NoError(t, err) + + // loads in-memory tries with genesis state root, should be deleted + // after another block is finalised + tr := trie.NewEmptyTrie() + err = tr.Load(bs.db, header.StateRoot) + require.NoError(t, err) + bs.tries.softSet(header.StateRoot, tr) + + return bs +} + +func TestAddScheduledChangesKeepTheRightForkTree(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, testGenesisHeader, 10) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, gs.blockState, chainA[1], 9) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, gs.blockState, chainA[5], 10) + + scheduledChange := &types.GrandpaScheduledChange{ + Delay: 0, // delay of 0 means the modifications should be applied immediately + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + } + + // headersToAdd enables tracking error while adding expecific entries + // to the scheduled change fork tree, eg. + // - adding duplicate hashes entries: while adding the first entry everything should be ok, + // however when adding the second duplicated entry we should expect the errDuplicateHashes error + type headersToAdd struct { + header *types.Header + wantErr error + } + + tests := map[string]struct { + headersWithScheduledChanges []headersToAdd + expectedRoots int + highestFinalizedHeader *types.Header + }{ + "add_scheduled_changes_only_with_roots": { + headersWithScheduledChanges: []headersToAdd{ + {header: chainA[6]}, + {header: chainB[3]}, + }, + expectedRoots: 2, + }, + "add_scheduled_changes_with_roots_and_children": { + headersWithScheduledChanges: []headersToAdd{ + {header: chainA[6]}, {header: chainA[8]}, + {header: chainB[3]}, {header: chainB[7]}, {header: chainB[9]}, + {header: chainC[8]}, + }, + expectedRoots: 3, + }, + "add_scheduled_changes_with_same_hash": { + headersWithScheduledChanges: []headersToAdd{ + {header: chainA[3]}, + { + header: chainA[3], + wantErr: fmt.Errorf("cannot import scheduled change: %w: %s", + errDuplicateHashes, chainA[3].Hash()), + }, + }, + expectedRoots: 0, + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + // clear the scheduledChangeRoots after the test ends + // this does not cause race condition because t.Run without + // t.Parallel() blocks until this function returns + defer func() { + gs.scheduledChangeRoots = new(changeTree) + }() + + updateHighestFinalizedHeaderOrDefault(t, gs.blockState, tt.highestFinalizedHeader, chainA[0]) + + for _, entry := range tt.headersWithScheduledChanges { + err := gs.addScheduledChange(entry.header, *scheduledChange) + + if entry.wantErr != nil { + require.Error(t, err) + require.EqualError(t, err, entry.wantErr.Error()) + return + } + + require.NoError(t, err) + } + + require.Len(t, *gs.scheduledChangeRoots, tt.expectedRoots) + + for _, root := range *gs.scheduledChangeRoots { + parentHash := root.change.announcingHeader.Hash() + assertDescendantChildren(t, parentHash, gs.blockState.IsDescendantOf, root.nodes) + } + }) + } +} + +func assertDescendantChildren(t *testing.T, parentHash common.Hash, isDescendantOfFunc isDescendantOfFunc, + changes changeTree) { + t.Helper() + + for _, scheduled := range changes { + scheduledChangeHash := scheduled.change.announcingHeader.Hash() + isDescendant, err := isDescendantOfFunc(parentHash, scheduledChangeHash) + require.NoError(t, err) + require.Truef(t, isDescendant, "%s is not descendant of %s", scheduledChangeHash, parentHash) + + assertDescendantChildren(t, scheduledChangeHash, isDescendantOfFunc, scheduled.nodes) + } +} + +// updateHighestFinalizedHeaderOrDefault will update the current highest finalized header +// with the value of newHighest, if the newHighest is nil then it will use the def value +func updateHighestFinalizedHeaderOrDefault(t *testing.T, bs *BlockState, newHighest, def *types.Header) { + t.Helper() + + round, setID, err := bs.GetHighestRoundAndSetID() + require.NoError(t, err) + + if newHighest != nil { + bs.db.Put(finalisedHashKey(round, setID), newHighest.Hash().ToBytes()) + } else { + bs.db.Put(finalisedHashKey(round, setID), def.Hash().ToBytes()) + } +} + +func TestForcedScheduledChangesOrder(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + require.NoError(t, err) + + aliceHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, + testGenesisHeader, 5) + + bobHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyBob, gs.blockState, + aliceHeaders[1], 5) + + charlieHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, gs.blockState, + aliceHeaders[2], 6) + + forcedChanges := map[*types.Header]types.GrandpaForcedChange{ + bobHeaders[1]: { + Delay: 1, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + aliceHeaders[3]: { + Delay: 5, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + charlieHeaders[4]: { + Delay: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + } + + for header, fc := range forcedChanges { + err := gs.addForcedChange(header, fc) + require.NoError(t, err, "failed to add forced change") + } + + forcedChangesSlice := *gs.forcedChanges + for idx := 0; idx < gs.forcedChanges.Len()-1; idx++ { + currentChange := forcedChangesSlice[idx] + nextChange := forcedChangesSlice[idx+1] + + require.LessOrEqual(t, currentChange.effectiveNumber(), + nextChange.effectiveNumber()) + + require.LessOrEqual(t, currentChange.announcingHeader.Number, + nextChange.announcingHeader.Number) + } +} + +func TestShouldNotAddMoreThanOneForcedChangeInTheSameFork(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + require.NoError(t, err) + + aliceHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, + testGenesisHeader, 5) + + bobHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyBob, gs.blockState, + aliceHeaders[1], 5) + + someForcedChange := types.GrandpaForcedChange{ + Delay: 1, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + } + + // adding more than one forced changes in the same branch + err = gs.addForcedChange(aliceHeaders[3], someForcedChange) + require.NoError(t, err) + + err = gs.addForcedChange(aliceHeaders[4], someForcedChange) + require.Error(t, err) + require.ErrorIs(t, err, errAlreadyHasForcedChange) + + // adding the same forced change twice + err = gs.addForcedChange(bobHeaders[2], someForcedChange) + require.NoError(t, err) + + err = gs.addForcedChange(bobHeaders[2], someForcedChange) + require.Error(t, err) + require.ErrorIs(t, err, errDuplicateHashes) +} + +func issueBlocksWithBABEPrimary(t *testing.T, kp *sr25519.Keypair, + bs *BlockState, parentHeader *types.Header, size int) (headers []*types.Header) { + t.Helper() + + transcript := merlin.NewTranscript("BABE") + crypto.AppendUint64(transcript, []byte("slot number"), 1) + crypto.AppendUint64(transcript, []byte("current epoch"), 1) + transcript.AppendMessage([]byte("chain randomness"), []byte{}) + + output, proof, err := kp.VrfSign(transcript) + require.NoError(t, err) + + babePrimaryPreDigest := types.BabePrimaryPreDigest{ + SlotNumber: 1, + VRFOutput: output, + VRFProof: proof, + } + + preRuntimeDigest, err := babePrimaryPreDigest.ToPreRuntimeDigest() + require.NoError(t, err) + + digest := types.NewDigest() + + require.NoError(t, digest.Add(*preRuntimeDigest)) + header := &types.Header{ + ParentHash: parentHeader.Hash(), + Number: parentHeader.Number + 1, + Digest: digest, + } + + block := &types.Block{ + Header: *header, + Body: *types.NewBody([]types.Extrinsic{}), + } + + err = bs.AddBlock(block) + require.NoError(t, err) + + if size <= 0 { + headers = append(headers, header) + return headers + } + + headers = append(headers, header) + headers = append(headers, issueBlocksWithBABEPrimary(t, kp, bs, header, size-1)...) + return headers +} + +func TestNextGrandpaAuthorityChange(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + tests := map[string]struct { + forcedChange *types.GrandpaForcedChange + forcedChangeAnnoucingIndex int + + scheduledChange *types.GrandpaScheduledChange + scheduledChangeAnnoucingIndex int + + wantErr error + expectedBlockNumber uint + }{ + "no_forced_change_no_scheduled_change": { + wantErr: ErrNoNextAuthorityChange, + }, + "only_forced_change": { + forcedChangeAnnoucingIndex: 2, // in the chain headers slice the index 2 == block number 3 + forcedChange: &types.GrandpaForcedChange{ + Delay: 2, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 5, + }, + "only_scheduled_change": { + scheduledChangeAnnoucingIndex: 3, // in the chain headers slice the index 3 == block number 4 + scheduledChange: &types.GrandpaScheduledChange{ + Delay: 4, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 8, + }, + "forced_change_before_scheduled_change": { + forcedChangeAnnoucingIndex: 2, // in the chain headers slice the index 2 == block number 3 + forcedChange: &types.GrandpaForcedChange{ + Delay: 2, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + scheduledChangeAnnoucingIndex: 3, // in the chain headers slice the index 3 == block number 4 + scheduledChange: &types.GrandpaScheduledChange{ + Delay: 4, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 5, // forced change occurs before the scheduled change + }, + "scheduled_change_before_forced_change": { + scheduledChangeAnnoucingIndex: 3, // in the chain headers slice the index 3 == block number 4 + scheduledChange: &types.GrandpaScheduledChange{ + Delay: 4, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + forcedChangeAnnoucingIndex: 8, // in the chain headers slice the index 8 == block number 9 + forcedChange: &types.GrandpaForcedChange{ + Delay: 1, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 8, // scheduled change occurs before the forced change + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + require.NoError(t, err) + + const sizeOfChain = 10 + + chainHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, + testGenesisHeader, sizeOfChain) + + if tt.forcedChange != nil { + gs.addForcedChange(chainHeaders[tt.forcedChangeAnnoucingIndex], + *tt.forcedChange) + } + + if tt.scheduledChange != nil { + gs.addScheduledChange(chainHeaders[tt.scheduledChangeAnnoucingIndex], + *tt.scheduledChange) + } + + lastBlockOnChain := chainHeaders[sizeOfChain] + blockNumber, err := gs.NextGrandpaAuthorityChange(lastBlockOnChain.Hash(), lastBlockOnChain.Number) + + if tt.wantErr != nil { + require.Error(t, err) + require.EqualError(t, err, tt.wantErr.Error()) + require.Zero(t, blockNumber) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedBlockNumber, blockNumber) + } + }) + } +} + +func TestApplyForcedChanges(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + wantErr error + // 2 index array where the 0 index describes the fork and the 1 index describes the header + importedHeader [2]int + expectedGRANDPAAuthoritySet []types.GrandpaAuthoritiesRaw + expectedSetID uint64 + + generateForks func(t *testing.T, blockState *BlockState) [][]*types.Header + changes func(*GrandpaState, [][]*types.Header) + }{ + "no_forced_changes": { + generateForks: genericForks, + importedHeader: [2]int{0, 3}, // chain A from and header number 4 + expectedSetID: 0, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + }, + "apply_forced_change_without_pending_scheduled_changes": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock8 := headers[0][7] + gs.addForcedChange(chainABlock8, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock15 := headers[2][8] + gs.addForcedChange(chainCBlock15, types.GrandpaForcedChange{ + Delay: 1, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{0, 9}, // import block number 10 from fork A + expectedSetID: 1, + expectedGRANDPAAuthoritySet: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + "import_block_before_forced_change_should_do_nothing": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainCBlock9 := headers[2][2] + gs.addForcedChange(chainCBlock9, types.GrandpaForcedChange{ + Delay: 3, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{2, 1}, // import block number 7 from chain C + expectedSetID: 0, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + }, + "import_block_from_another_fork_should_do_nothing": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainCBlock9 := headers[2][2] + gs.addForcedChange(chainCBlock9, types.GrandpaForcedChange{ + Delay: 3, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{1, 9}, // import block number 12 from chain B + expectedSetID: 0, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + }, + "apply_forced_change_with_pending_scheduled_changes_should_fail": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock6 := headers[1][3] + gs.addScheduledChange(chainBBlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock9 := headers[2][2] + gs.addForcedChange(chainCBlock9, types.GrandpaForcedChange{ + Delay: 3, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock9 := headers[1][6] + gs.addForcedChange(chainBBlock9, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 6, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{1, 8}, // block number 11 imported + wantErr: errPendingScheduledChanges, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + expectedSetID: 0, + }, + } + + for tname, tt := range tests { + tt := tt + + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, blockState) + if tt.changes != nil { + tt.changes(gs, forks) + } + + selectedFork := forks[tt.importedHeader[0]] + selectedImportedHeader := selectedFork[tt.importedHeader[1]] + + err = gs.ApplyForcedChanges(selectedImportedHeader) + if tt.wantErr != nil { + require.Error(t, err) + require.ErrorIs(t, err, tt.wantErr) + } else { + require.NoError(t, err) + } + + currentSetID, err := gs.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, tt.expectedSetID, currentSetID) + + expectedAuths, err := types.GrandpaAuthoritiesRawToAuthorities(tt.expectedGRANDPAAuthoritySet) + require.NoError(t, err) + expectedVoters := types.NewGrandpaVotersFromAuthorities(expectedAuths) + + gotVoters, err := gs.GetAuthorities(tt.expectedSetID) + require.NoError(t, err) + + require.Equal(t, expectedVoters, gotVoters) + }) + } +} + +func TestApplyScheduledChangesKeepDescendantForcedChanges(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + finalizedHeader [2]int // 2 index array where the 0 index describes the fork and the 1 index describes the header + + generateForks func(*testing.T, *BlockState) [][]*types.Header + changes func(*GrandpaState, [][]*types.Header) + + wantErr error + + expectedForcedChangesLen int + }{ + "no_forced_changes": { + generateForks: genericForks, + expectedForcedChangesLen: 0, + }, + "finalized_hash_should_keep_descendant_forced_changes": { + generateForks: genericForks, + expectedForcedChangesLen: 1, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock6 := headers[0][5] + gs.addForcedChange(chainABlock6, types.GrandpaForcedChange{ + Delay: 1, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock6 := headers[1][3] + gs.addForcedChange(chainBBlock6, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 3}, //finalize header number 4 from chain A + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, gs.blockState) + + if tt.changes != nil { + tt.changes(gs, forks) + } + + selectedFork := forks[tt.finalizedHeader[0]] + selectedFinalizedHeader := selectedFork[tt.finalizedHeader[1]] + + err = gs.forcedChanges.pruneChanges(selectedFinalizedHeader.Hash(), gs.blockState.IsDescendantOf) + if tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + + require.Len(t, *gs.forcedChanges, tt.expectedForcedChangesLen) + + for _, forcedChange := range *gs.forcedChanges { + isDescendant, err := gs.blockState.IsDescendantOf( + selectedFinalizedHeader.Hash(), forcedChange.announcingHeader.Hash()) + + require.NoError(t, err) + require.True(t, isDescendant) + } + } + }) + } +} + +func TestApplyScheduledChangeGetApplicableChange(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + finalizedHeader [2]int + changes func(*GrandpaState, [][]*types.Header) + generateForks func(*testing.T, *BlockState) [][]*types.Header + wantErr error + expectedChange *pendingChange + expectedScheduledChangeRootsLen int + }{ + "empty_scheduled_changes": { + generateForks: genericForks, + finalizedHeader: [2]int{0, 1}, // finalized block from chainA header number 2 + }, + "scheduled_change_being_finalized_should_be_applied": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock6 := headers[0][5] + gs.addScheduledChange(chainABlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + expectedChange: &pendingChange{ + delay: 0, + nextAuthorities: func() []types.Authority { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities( + []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + ) + return auths + }(), + }, + finalizedHeader: [2]int{0, 5}, // finalize block number 6 from chain A + }, + "apply_change_and_update_scheduled_changes_with_the_children": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock4 := headers[1][1] // block number 4 from chain B + gs.addScheduledChange(chainBBlock4, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyFerdie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyGeorge.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock7 := headers[1][4] // block number 7 from chain B + gs.addScheduledChange(chainBBlock7, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock7 := headers[2][0] // block number 7 from chain C + gs.addScheduledChange(chainCBlock7, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{1, 1}, // finalize block number 6 from chain A + expectedScheduledChangeRootsLen: 1, + expectedChange: &pendingChange{ + delay: 0, + nextAuthorities: func() []types.Authority { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities( + []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyFerdie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyGeorge.Public().(*sr25519.PublicKey).AsBytes()}, + }, + ) + return auths + }(), + }, + }, + "finalized_header_with_no_scheduled_change_should_purge_other_pending_changes": { + generateForks: genericForks, + expectedScheduledChangeRootsLen: 1, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock8 := headers[0][7] // block 8 from chain A should keep + gs.addScheduledChange(chainABlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock9 := headers[1][6] // block 9 from chain B should be pruned + gs.addScheduledChange(chainBBlock9, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock8 := headers[2][1] // block 8 from chain C should be pruned + gs.addScheduledChange(chainCBlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 6}, // finalize block number 7 from chain A + }, + "finalising_header_with_pending_changes_should_return_unfinalized_acestor": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock4 := headers[0][3] // block 4 from chain A + gs.addScheduledChange(chainABlock4, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + // change on block 5 from chain A should be a child + // node of scheduled change on block 4 from chain A + chainABlock5 := headers[0][5] + gs.addScheduledChange(chainABlock5, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 6}, // finalize block number 7 from chain A + wantErr: fmt.Errorf("failed while applying condition: %w", errUnfinalizedAncestor), + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, gs.blockState) + + if tt.changes != nil { + tt.changes(gs, forks) + } + + // saving the current state of scheduled changes to compare + // with the next state in the case of an error (should keep the same) + previousScheduledChanges := gs.scheduledChangeRoots + + selectedChain := forks[tt.finalizedHeader[0]] + selectedHeader := selectedChain[tt.finalizedHeader[1]] + + changeNode, err := gs.scheduledChangeRoots.findApplicable(selectedHeader.Hash(), + selectedHeader.Number, gs.blockState.IsDescendantOf) + if tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + require.Equal(t, previousScheduledChanges, gs.scheduledChangeRoots) + return + } + + if tt.expectedChange != nil { + require.NoError(t, err) + require.Equal(t, tt.expectedChange.delay, changeNode.change.delay) + require.Equal(t, tt.expectedChange.nextAuthorities, changeNode.change.nextAuthorities) + } else { + require.Nil(t, changeNode) + } + + require.Len(t, *gs.scheduledChangeRoots, tt.expectedScheduledChangeRootsLen) + // make sure all the next scheduled changes are descendant of the finalized hash + assertDescendantChildren(t, + selectedHeader.Hash(), gs.blockState.IsDescendantOf, *gs.scheduledChangeRoots) + }) + } +} + +func TestApplyScheduledChange(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + finalizedHeader [2]int // 2 index array where the 0 index describes the fork and the 1 index describes the header + + generateForks func(*testing.T, *BlockState) [][]*types.Header + changes func(*GrandpaState, [][]*types.Header) + + wantErr error + expectedScheduledChangeRootsLen int + expectedForcedChangesLen int + expectedSetID uint64 + expectedAuthoritySet []types.GrandpaVoter + changeSetIDAt uint + }{ + "empty_scheduled_changes_only_update_the_forced_changes": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock6 := headers[0][5] // block number 6 from chain A + gs.addForcedChange(chainABlock6, types.GrandpaForcedChange{ + Delay: 1, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock6 := headers[1][3] // block number 6 from chain B + gs.addForcedChange(chainBBlock6, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 3}, + expectedForcedChangesLen: 1, + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + "pending_scheduled_changes_should_return_error": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock4 := headers[0][3] // block 4 from chain A + gs.addScheduledChange(chainABlock4, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + // change on block 5 from chain A should be a child + // node of scheduled change on block 4 from chain A + chainABlock5 := headers[0][5] + gs.addScheduledChange(chainABlock5, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 6}, // finalize block number 7 from chain A + wantErr: fmt.Errorf( + "cannot get applicable scheduled change: failed while applying condition: %w", errUnfinalizedAncestor), + expectedScheduledChangeRootsLen: 1, // expected one root len as the second change is a child + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + "no_changes_to_apply_should_only_update_the_scheduled_roots": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock6 := headers[1][3] // block 6 from chain B + gs.addScheduledChange(chainBBlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock8 := headers[1][5] // block number 8 from chain B + gs.addScheduledChange(chainBBlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{2, 1}, // finalize block number 8 from chain C + expectedScheduledChangeRootsLen: 0, + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + "apply_scheduled_change_should_change_voters_and_set_id": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock6 := headers[1][3] // block 6 from chain B + gs.addScheduledChange(chainBBlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock8 := headers[1][5] // block number 8 from chain B + err = gs.addScheduledChange(chainBBlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{1, 3}, // finalize block number 6 from chain B + // the child (block number 8 from chain B) should be the next scheduled change root + expectedScheduledChangeRootsLen: 1, + expectedSetID: 1, + changeSetIDAt: 6, + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities([]types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, gs.blockState) + + if tt.changes != nil { + tt.changes(gs, forks) + } + + selectedFork := forks[tt.finalizedHeader[0]] + selectedFinalizedHeader := selectedFork[tt.finalizedHeader[1]] + + err = gs.ApplyScheduledChanges(selectedFinalizedHeader) + if tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + + // ensure the forced changes and scheduled changes + // are descendant of the latest finalized header + forcedChangeSlice := *gs.forcedChanges + for _, forcedChange := range forcedChangeSlice { + isDescendant, err := gs.blockState.IsDescendantOf( + selectedFinalizedHeader.Hash(), forcedChange.announcingHeader.Hash()) + + require.NoError(t, err) + require.True(t, isDescendant) + } + + assertDescendantChildren(t, + selectedFinalizedHeader.Hash(), gs.blockState.IsDescendantOf, *gs.scheduledChangeRoots) + } + + require.Len(t, *gs.forcedChanges, tt.expectedForcedChangesLen) + require.Len(t, *gs.scheduledChangeRoots, tt.expectedScheduledChangeRootsLen) + + currentSetID, err := gs.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, tt.expectedSetID, currentSetID) + + currentVoters, err := gs.GetAuthorities(currentSetID) + require.NoError(t, err) + require.Equal(t, tt.expectedAuthoritySet, currentVoters) + + blockNumber, err := gs.GetSetIDChange(currentSetID) + require.NoError(t, err) + require.Equal(t, tt.changeSetIDAt, blockNumber) + }) + } +} diff --git a/dot/state/initialize.go b/dot/state/initialize.go index 3769303423..cd820a71a9 100644 --- a/dot/state/initialize.go +++ b/dot/state/initialize.go @@ -89,7 +89,7 @@ func (s *Service) Initialise(gen *genesis.Genesis, header *types.Header, t *trie return fmt.Errorf("failed to load grandpa authorities: %w", err) } - grandpaState, err := NewGrandpaStateFromGenesis(db, grandpaAuths) + grandpaState, err := NewGrandpaStateFromGenesis(db, blockState, grandpaAuths) if err != nil { return fmt.Errorf("failed to create grandpa state: %s", err) } diff --git a/dot/state/service.go b/dot/state/service.go index b425fa163b..6794fee251 100644 --- a/dot/state/service.go +++ b/dot/state/service.go @@ -159,11 +159,7 @@ func (s *Service) Start() error { return fmt.Errorf("failed to create epoch state: %w", err) } - s.Grandpa, err = NewGrandpaState(s.db) - if err != nil { - return fmt.Errorf("failed to create grandpa state: %w", err) - } - + s.Grandpa = NewGrandpaState(s.db, s.Block) num, _ := s.Block.BestBlockNumber() logger.Infof( "created state service with head %s, highest number %d and genesis hash %s", diff --git a/dot/state/service_test.go b/dot/state/service_test.go index 7782a88047..d45ae15fe9 100644 --- a/dot/state/service_test.go +++ b/dot/state/service_test.go @@ -316,13 +316,13 @@ func TestService_Rewind(t *testing.T) { err = serv.Grandpa.setCurrentSetID(3) require.NoError(t, err) - err = serv.Grandpa.setSetIDChangeAtBlock(1, 5) + err = serv.Grandpa.setChangeSetIDAtBlock(1, 5) require.NoError(t, err) - err = serv.Grandpa.setSetIDChangeAtBlock(2, 8) + err = serv.Grandpa.setChangeSetIDAtBlock(2, 8) require.NoError(t, err) - err = serv.Grandpa.setSetIDChangeAtBlock(3, 10) + err = serv.Grandpa.setChangeSetIDAtBlock(3, 10) require.NoError(t, err) AddBlocksToState(t, serv.Block, 12, false) diff --git a/dot/state/tries_test.go b/dot/state/tries_test.go index 388a689106..50aa601633 100644 --- a/dot/state/tries_test.go +++ b/dot/state/tries_test.go @@ -169,13 +169,15 @@ func Test_Tries_get(t *testing.T) { tries: &Tries{ rootToTrie: map[common.Hash]*trie.Trie{ {1, 2, 3}: trie.NewTrie(&node.Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }), }, }, root: common.Hash{1, 2, 3}, trie: trie.NewTrie(&node.Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }), }, "not found in map": { diff --git a/dot/types/consensus_digest.go b/dot/types/consensus_digest.go index 0d704e964d..b33e884555 100644 --- a/dot/types/consensus_digest.go +++ b/dot/types/consensus_digest.go @@ -31,8 +31,12 @@ func (sc GrandpaScheduledChange) Index() uint { return 1 } // GrandpaForcedChange represents a GRANDPA forced authority change type GrandpaForcedChange struct { - Auths []GrandpaAuthoritiesRaw - Delay uint32 + // BestFinalizedBlock is specified by the governance mechanism, defines + // the starting block at which Delay is applied. + // https://github.com/w3f/polkadot-spec/pull/506#issuecomment-1128849492 + BestFinalizedBlock uint32 + Auths []GrandpaAuthoritiesRaw + Delay uint32 } // Index Returns VDT index diff --git a/dot/types/digest.go b/dot/types/digest.go index e0796dcca1..4624d25837 100644 --- a/dot/types/digest.go +++ b/dot/types/digest.go @@ -29,6 +29,10 @@ func (h ConsensusEngineID) ToBytes() []byte { return b[:] } +func (h ConsensusEngineID) String() string { + return fmt.Sprintf("0x%x", h.ToBytes()) +} + // BabeEngineID is the hard-coded babe ID var BabeEngineID = ConsensusEngineID{'B', 'A', 'B', 'E'} diff --git a/go.mod b/go.mod index c3d0c66a56..a674ad08ec 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de github.com/disiqueira/gotree v1.0.0 github.com/docker/docker v20.10.17+incompatible - github.com/ethereum/go-ethereum v1.10.19 + github.com/ethereum/go-ethereum v1.10.20 github.com/fatih/color v1.13.0 github.com/go-playground/validator/v10 v10.11.0 github.com/golang/mock v1.6.0 @@ -27,7 +27,7 @@ require ( github.com/ipfs/go-ds-badger2 v0.1.1 github.com/ipfs/go-ipns v0.1.2 //indirect github.com/jpillora/ipfilter v1.2.6 - github.com/klauspost/compress v1.15.6 + github.com/klauspost/compress v1.15.7 github.com/libp2p/go-libp2p v0.15.1 github.com/libp2p/go-libp2p-core v0.9.0 github.com/libp2p/go-libp2p-discovery v0.5.1 @@ -40,11 +40,11 @@ require ( github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/qdm12/gotree v0.2.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.0 github.com/urfave/cli v1.22.9 github.com/wasmerio/go-ext-wasm v0.3.2-0.20200326095750-0a32be6068ec golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 google.golang.org/protobuf v1.28.0 ) @@ -55,9 +55,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd v0.22.0-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.0 github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set v1.8.0 // indirect @@ -158,12 +159,11 @@ require ( github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/rs/cors v1.7.0 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/rs/cors v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect - github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce // indirect @@ -176,9 +176,9 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect + golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect google.golang.org/appengine v1.6.6 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index a975634576..da6ad2aeb4 100644 --- a/go.sum +++ b/go.sum @@ -155,6 +155,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/centrifuge/go-substrate-rpc-client/v3 v3.0.2 h1:SQNaOeTmW2y2fmJgR5a7KIozjaOYi34GxafQ4efGc5U= github.com/centrifuge/go-substrate-rpc-client/v3 v3.0.2/go.mod h1:ZYSX8OuIJgZ9aVdKLhIi1G4Rj42Ys4nZNsWW70yfCJc= +github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.0 h1:/t8Aw7d3rCu1uqYFFG2JIoYK/W6/Af5C1+WNF6XyYL8= +github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.0/go.mod h1:MDzvG8lkzMGRaO4qzvxdfJtlDtukRPqNVWG9HJybVt0= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -192,8 +194,8 @@ github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -262,8 +264,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg= -github.com/ethereum/go-ethereum v1.10.19 h1:EOR5JbL4MD5yeOqv8W2iC1s4NximrTjqFccUz8lyBRA= -github.com/ethereum/go-ethereum v1.10.19/go.mod h1:IJBNMtzKcNHPtllYihy6BL2IgK1u+32JriaTbdt4v+w= +github.com/ethereum/go-ethereum v1.10.20 h1:75IW830ClSS40yrQC1ZCMZCt5I+zU16oqId2SiQwdQ4= +github.com/ethereum/go-ethereum v1.10.20/go.mod h1:LWUN82TCHGpxB3En5HVmLLzPD7YSrEUFmFfN1nKkVN0= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= @@ -285,6 +287,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= @@ -306,8 +310,10 @@ github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= @@ -605,8 +611,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= -github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= @@ -632,6 +638,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -890,6 +897,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= @@ -1149,12 +1157,14 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1184,7 +1194,6 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= @@ -1222,8 +1231,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1232,12 +1242,12 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= @@ -1250,7 +1260,9 @@ github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoi github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc h1:RTUQlKzoZZVG3umWNzOYeFecQLIh+dbxXvJp1zPQJTI= github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc/go.mod h1:NoCfSFWosfqMqmmD7hApkirIK9ozpHjxRnRxs1l413A= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.9 h1:cv3/KhXGBGjEXLC4bH0sLuJ9BewaAbpk5oyMOveu4pw= @@ -1456,8 +1468,9 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1506,6 +1519,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1563,11 +1577,13 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1772,6 +1788,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= diff --git a/internal/trie/node/README.md b/internal/trie/node/README.md new file mode 100644 index 0000000000..cae00b0ff0 --- /dev/null +++ b/internal/trie/node/README.md @@ -0,0 +1,31 @@ +# Trie node + +Package node defines the `Node` structure with methods to be used in the modified Merkle-Patricia Radix-16 trie. + +## Codec + +The following sub-sections precise the encoding of a node. +This encoding is formally described in [the Polkadot specification](https://spec.polkadot.network/#sect-state-storage). + +### Header + +Each node encoding has a header of one or more bytes. +The first byte contains the node variant and some or all of the partial key length of the node. +If the partial key length cannot fit in the first byte, additional bytes are added to the header to represent the total partial key length. + +### Partial key + +The header is then concatenated with the partial key of the node, encoded as Little Endian bytes. + +### Remaining bytes + +The remaining bytes appended depend on the node variant. + +- For leaves, the SCALE-encoded leaf value is appended. +- For branches, the following elements are concatenated in this order and appended to the previous header+partial key: + - Children bitmap (2 bytes) + - SCALE-encoded node value + - Hash(Encoding(Child[0])) + - Hash(Encoding(Child[1])) + - ... + - Hash(Encoding(Child[15])) diff --git a/internal/trie/node/branch_encode_test.go b/internal/trie/node/branch_encode_test.go index 88efb2e752..c0028cc897 100644 --- a/internal/trie/node/branch_encode_test.go +++ b/internal/trie/node/branch_encode_test.go @@ -218,11 +218,11 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { "no children": {}, "first child not nil": { children: []*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, @@ -231,25 +231,25 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, "first two children not nil": { children: []*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{2}}, + {Key: []byte{3}, Value: []byte{4}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, { - written: []byte{12, 65, 2, 0}, + written: []byte{16, 65, 3, 4, 4}, }, }, }, @@ -258,12 +258,12 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, nil, nil, nil, nil, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, err: errTest, }, }, @@ -278,13 +278,13 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { { Key: []byte{1}, Children: []*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, }, }, writes: []writeCall{ { - written: []byte{32, 129, 1, 1, 0, 12, 65, 1, 0}, + written: []byte{36, 129, 1, 1, 0, 16, 65, 1, 4, 2}, }, }, }, @@ -360,11 +360,11 @@ func Test_encodeChildrenSequentially(t *testing.T) { "no children": {}, "first child not nil": { children: []*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, @@ -373,25 +373,25 @@ func Test_encodeChildrenSequentially(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, "first two children not nil": { children: []*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{2}}, + {Key: []byte{3}, Value: []byte{4}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, { - written: []byte{12, 65, 2, 0}, + written: []byte{16, 65, 3, 4, 4}, }, }, }, @@ -400,12 +400,12 @@ func Test_encodeChildrenSequentially(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, nil, nil, nil, nil, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, err: errTest, }, }, @@ -457,13 +457,6 @@ func Test_encodeChild(t *testing.T) { errMessage string }{ "nil node": {}, - "empty leaf child": { - child: &Node{}, - writeCall: true, - write: writeCall{ - written: []byte{8, 64, 0}, - }, - }, "empty branch child": { child: &Node{ Children: make([]*Node, ChildrenCapacity), @@ -547,25 +540,15 @@ func Test_scaleEncodeHash(t *testing.T) { wrappedErr error errMessage string }{ - "empty leaf": { - node: &Node{}, - encoding: []byte{0x8, 0x40, 0}, - }, - "empty branch": { - node: &Node{ - Children: make([]*Node, ChildrenCapacity), - }, - encoding: []byte{0xc, 0x80, 0x0, 0x0}, - }, - "non empty branch": { + "branch": { node: &Node{ Key: []byte{1, 2}, Value: []byte{3, 4}, Children: []*Node{ - nil, nil, {Key: []byte{9}}, + nil, nil, {Key: []byte{9}, Value: []byte{1}}, }, }, - encoding: []byte{0x2c, 0xc2, 0x12, 0x4, 0x0, 0x8, 0x3, 0x4, 0xc, 0x41, 0x9, 0x0}, + encoding: []byte{0x30, 0xc2, 0x12, 0x4, 0x0, 0x8, 0x3, 0x4, 0x10, 0x41, 0x9, 0x4, 0x1}, }, } diff --git a/internal/trie/node/copy_test.go b/internal/trie/node/copy_test.go index ad0f2f0a2a..8816ee4604 100644 --- a/internal/trie/node/copy_test.go +++ b/internal/trie/node/copy_test.go @@ -42,7 +42,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -55,7 +56,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -65,7 +67,8 @@ func Test_Node_Copy(t *testing.T) { node: &Node{ Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), }, @@ -75,7 +78,8 @@ func Test_Node_Copy(t *testing.T) { expectedNode: &Node{ Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), }, @@ -86,7 +90,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -99,7 +104,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -107,11 +113,6 @@ func Test_Node_Copy(t *testing.T) { Encoding: []byte{6}, }, }, - "empty leaf": { - node: &Node{}, - settings: DefaultCopySettings, - expectedNode: &Node{}, - }, "non empty leaf": { node: &Node{ Key: []byte{1, 2}, diff --git a/internal/trie/node/decode.go b/internal/trie/node/decode.go index cb6930bbee..a74994b0da 100644 --- a/internal/trie/node/decode.go +++ b/internal/trie/node/decode.go @@ -9,63 +9,68 @@ import ( "fmt" "io" - "github.com/ChainSafe/gossamer/internal/trie/pools" "github.com/ChainSafe/gossamer/pkg/scale" ) var ( - ErrReadHeaderByte = errors.New("cannot read header byte") - ErrUnknownNodeType = errors.New("unknown node type") + // ErrDecodeValue is defined since no sentinel error is defined + // in the scale package. + // TODO remove once the following issue is done: + // https://github.com/ChainSafe/gossamer/issues/2631 . ErrDecodeValue = errors.New("cannot decode value") ErrReadChildrenBitmap = errors.New("cannot read children bitmap") - ErrDecodeChildHash = errors.New("cannot decode child hash") + // ErrDecodeChildHash is defined since no sentinel error is defined + // in the scale package. + // TODO remove once the following issue is done: + // https://github.com/ChainSafe/gossamer/issues/2631 . + ErrDecodeChildHash = errors.New("cannot decode child hash") ) // Decode decodes a node from a reader. +// The encoding format is documented in the README.md +// of this package, and specified in the Polkadot spec at +// https://spec.polkadot.network/#sect-state-storage // For branch decoding, see the comments on decodeBranch. // For leaf decoding, see the comments on decodeLeaf. func Decode(reader io.Reader) (n *Node, err error) { - buffer := pools.SingleByteBuffers.Get().(*bytes.Buffer) - defer pools.SingleByteBuffers.Put(buffer) - oneByteBuf := buffer.Bytes() - _, err = reader.Read(oneByteBuf) + variant, partialKeyLength, err := decodeHeader(reader) if err != nil { - return nil, fmt.Errorf("%w: %s", ErrReadHeaderByte, err) + return nil, fmt.Errorf("decoding header: %w", err) } - header := oneByteBuf[0] - nodeTypeHeaderByte := header >> 6 - switch nodeTypeHeaderByte { - case leafHeader: - n, err = decodeLeaf(reader, header) + switch variant { + case leafVariant.bits: + n, err = decodeLeaf(reader, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode leaf: %w", err) } return n, nil - case branchHeader, branchWithValueHeader: - n, err = decodeBranch(reader, header) + case branchVariant.bits, branchWithValueVariant.bits: + n, err = decodeBranch(reader, variant, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode branch: %w", err) } return n, nil default: - return nil, fmt.Errorf("%w: %d", ErrUnknownNodeType, nodeTypeHeaderByte) + // this is a programming error, an unknown node variant + // should be caught by decodeHeader. + panic(fmt.Sprintf("not implemented for node variant %08b", variant)) } } -// decodeBranch reads and decodes from a reader with the encoding specified in internal/trie/node/encode_doc.go. +// decodeBranch reads from a reader and decodes to a node branch. // Note that since the encoded branch stores the hash of the children nodes, we are not // reconstructing the child nodes from the encoding. This function instead stubs where the // children are known to be with an empty leaf. The children nodes hashes are then used to // find other values using the persistent database. -func decodeBranch(reader io.Reader, header byte) (node *Node, err error) { +func decodeBranch(reader io.Reader, variant byte, partialKeyLength uint16) ( + node *Node, err error) { node = &Node{ Dirty: true, Children: make([]*Node, ChildrenCapacity), } - keyLen := header & keyLenOffset - node.Key, err = decodeKey(reader, keyLen) + node.Key, err = decodeKey(reader, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode key: %w", err) } @@ -78,18 +83,14 @@ func decodeBranch(reader io.Reader, header byte) (node *Node, err error) { sd := scale.NewDecoder(reader) - nodeType := header >> 6 - if nodeType == branchWithValueHeader { - var value []byte - // branch w/ value - err := sd.Decode(&value) + if variant == branchWithValueVariant.bits { + err := sd.Decode(&node.Value) if err != nil { return nil, fmt.Errorf("%w: %s", ErrDecodeValue, err) } - node.Value = value } - for i := 0; i < 16; i++ { + for i := 0; i < ChildrenCapacity; i++ { if (childrenBitmap[i/8]>>(i%8))&1 != 1 { continue } @@ -101,37 +102,35 @@ func decodeBranch(reader io.Reader, header byte) (node *Node, err error) { ErrDecodeChildHash, i, err) } - // Handle inlined leaf nodes. const hashLength = 32 - nodeTypeHeaderByte := hash[0] >> 6 - if nodeTypeHeaderByte == leafHeader && len(hash) < hashLength { - leaf, err := decodeLeaf(bytes.NewReader(hash[1:]), hash[0]) + childNode := &Node{ + HashDigest: hash, + Dirty: true, + } + if len(hash) < hashLength { + // Handle inlined nodes + reader = bytes.NewReader(hash) + childNode, err = Decode(reader) if err != nil { - return nil, fmt.Errorf("%w: at index %d: %s", - ErrDecodeValue, i, err) + return nil, fmt.Errorf("decoding inlined child at index %d: %w", i, err) } - node.Descendants++ - node.Children[i] = leaf - continue + node.Descendants += childNode.Descendants } node.Descendants++ - node.Children[i] = &Node{ - HashDigest: hash, - } + node.Children[i] = childNode } return node, nil } -// decodeLeaf reads and decodes from a reader with the encoding specified in lib/trie/node/encode_doc.go. -func decodeLeaf(reader io.Reader, header byte) (node *Node, err error) { +// decodeLeaf reads from a reader and decodes to a leaf node. +func decodeLeaf(reader io.Reader, partialKeyLength uint16) (node *Node, err error) { node = &Node{ Dirty: true, } - keyLen := header & keyLenOffset - node.Key, err = decodeKey(reader, keyLen) + node.Key, err = decodeKey(reader, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode key: %w", err) } diff --git a/internal/trie/node/decode_test.go b/internal/trie/node/decode_test.go index 6a0a916b81..9cf9979dda 100644 --- a/internal/trie/node/decode_test.go +++ b/internal/trie/node/decode_test.go @@ -14,6 +14,10 @@ import ( ) func scaleEncodeBytes(t *testing.T, b ...byte) (encoded []byte) { + return scaleEncodeByteSlice(t, b) +} + +func scaleEncodeByteSlice(t *testing.T, b []byte) (encoded []byte) { encoded, err := scale.Marshal(b) require.NoError(t, err) return encoded @@ -42,28 +46,29 @@ func Test_Decode(t *testing.T) { }{ "no data": { reader: bytes.NewReader(nil), - errWrapped: ErrReadHeaderByte, - errMessage: "cannot read header byte: EOF", + errWrapped: io.EOF, + errMessage: "decoding header: reading header byte: EOF", }, - "unknown node type": { + "unknown node variant": { reader: bytes.NewReader([]byte{0}), - errWrapped: ErrUnknownNodeType, - errMessage: "unknown node type: 0", + errWrapped: ErrVariantUnknown, + errMessage: "decoding header: decoding header byte: node variant is unknown: for header byte 00000000", }, "leaf decoding error": { reader: bytes.NewReader([]byte{ - 65, // node type 1 (leaf) and key length 1 + leafVariant.bits | 1, // key length 1 // missing key data byte }), - errWrapped: ErrReadKeyData, - errMessage: "cannot decode leaf: cannot decode key: cannot read key data: EOF", + errWrapped: io.EOF, + errMessage: "cannot decode leaf: cannot decode key: " + + "reading from reader: EOF", }, "leaf success": { reader: bytes.NewReader( append( []byte{ - 65, // node type 1 (leaf) and key length 1 - 9, // key data + leafVariant.bits | 1, // key length 1 + 9, // key data }, scaleEncodeBytes(t, 1, 2, 3)..., ), @@ -76,18 +81,19 @@ func Test_Decode(t *testing.T) { }, "branch decoding error": { reader: bytes.NewReader([]byte{ - 129, // node type 2 (branch without value) and key length 1 + branchVariant.bits | 1, // key length 1 // missing key data byte }), - errWrapped: ErrReadKeyData, - errMessage: "cannot decode branch: cannot decode key: cannot read key data: EOF", + errWrapped: io.EOF, + errMessage: "cannot decode branch: cannot decode key: " + + "reading from reader: EOF", }, "branch success": { reader: bytes.NewReader( []byte{ - 129, // node type 2 (branch without value) and key length 1 - 9, // key data - 0, 0, // no children bitmap + branchVariant.bits | 1, // key length 1 + 9, // key data + 0, 0, // no children bitmap }, ), n: &Node{ @@ -96,66 +102,6 @@ func Test_Decode(t *testing.T) { Dirty: true, }, }, - "branch with two inlined children": { - reader: bytes.NewReader( - []byte{ - 158, // node type 2 (branch w/o value) and key length 30 - // Key data start - 195, 101, 195, 207, 89, 214, - 113, 235, 114, 218, 14, 122, - 65, 19, 196, 16, 2, 80, 95, - 14, 123, 144, 18, 9, 107, - 65, 196, 235, 58, 175, - // Key data end - 148, 127, 110, 164, 41, 8, 0, 0, 104, 95, 15, 31, 5, - 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, - 187, 32, 134, 92, 74, 43, 127, 1, 0, 0, - }, - ), - n: &Node{ - Key: []byte{ - 12, 3, 6, 5, 12, 3, - 12, 15, 5, 9, 13, 6, - 7, 1, 14, 11, 7, 2, - 13, 10, 0, 14, 7, 10, - 4, 1, 1, 3, 12, 4, - }, - Descendants: 2, - Children: []*Node{ - nil, nil, nil, nil, - { - Key: []byte{ - 14, 7, 11, 9, 0, 1, - 2, 0, 9, 6, 11, 4, - 1, 12, 4, 14, 11, - 3, 10, 10, 15, 9, - 4, 7, 15, 6, 14, - 10, 4, 2, 9, - }, - Value: []byte{0, 0}, - Dirty: true, - }, - nil, nil, nil, nil, - { - Key: []byte{ - 15, 1, 15, 0, 5, 1, - 5, 15, 4, 6, 2, 12, - 13, 12, 15, 8, 4, - 14, 0, 15, 1, 13, - 6, 0, 4, 5, 13, - 15, 12, 11, 11, - }, - Value: []byte{ - 134, 92, 74, 43, - 127, 1, 0, 0, - }, - Dirty: true, - }, - nil, nil, nil, nil, nil, nil, - }, - Dirty: true, - }, - }, } for name, testCase := range testCases { @@ -177,29 +123,39 @@ func Test_Decode(t *testing.T) { func Test_decodeBranch(t *testing.T) { t.Parallel() + const childHashLength = 32 + childHash := make([]byte, childHashLength) + for i := range childHash { + childHash[i] = byte(i) + } + scaleEncodedChildHash := scaleEncodeByteSlice(t, childHash) + testCases := map[string]struct { - reader io.Reader - header byte - branch *Node - errWrapped error - errMessage string + reader io.Reader + variant byte + partialKeyLength uint16 + branch *Node + errWrapped error + errMessage string }{ "key decoding error": { reader: bytes.NewBuffer([]byte{ // missing key data byte }), - header: 129, // node type 2 (branch without value) and key length 1 - errWrapped: ErrReadKeyData, - errMessage: "cannot decode key: cannot read key data: EOF", + variant: branchVariant.bits, + partialKeyLength: 1, + errWrapped: io.EOF, + errMessage: "cannot decode key: reading from reader: EOF", }, "children bitmap read error": { reader: bytes.NewBuffer([]byte{ 9, // key data // missing children bitmap 2 bytes }), - header: 129, // node type 2 (branch without value) and key length 1 - errWrapped: ErrReadChildrenBitmap, - errMessage: "cannot read children bitmap: EOF", + variant: branchVariant.bits, + partialKeyLength: 1, + errWrapped: ErrReadChildrenBitmap, + errMessage: "cannot read children bitmap: EOF", }, "children decoding error": { reader: bytes.NewBuffer([]byte{ @@ -207,35 +163,36 @@ func Test_decodeBranch(t *testing.T) { 0, 4, // children bitmap // missing children scale encoded data }), - header: 129, // node type 2 (branch without value) and key length 1 - errWrapped: ErrDecodeChildHash, - errMessage: "cannot decode child hash: at index 10: EOF", + variant: branchVariant.bits, + partialKeyLength: 1, + errWrapped: ErrDecodeChildHash, + errMessage: "cannot decode child hash: at index 10: EOF", }, - "success node type 2": { + "success for branch variant": { reader: bytes.NewBuffer( concatByteSlices([][]byte{ - { - 9, // key data - 0, 4, // children bitmap - }, - scaleEncodeBytes(t, 1, 2, 3, 4, 5), // child hash + {9}, // key data + {0, 4}, // children bitmap + scaleEncodedChildHash, }), ), - header: 129, // node type 2 (branch without value) and key length 1 + variant: branchVariant.bits, + partialKeyLength: 1, branch: &Node{ Key: []byte{9}, Children: padRightChildren([]*Node{ nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, { - HashDigest: []byte{1, 2, 3, 4, 5}, + HashDigest: childHash, + Dirty: true, }, }), Dirty: true, Descendants: 1, }, }, - "value decoding error for node type 3": { + "value decoding error for branch with value variant": { reader: bytes.NewBuffer( concatByteSlices([][]byte{ {9}, // key data @@ -243,20 +200,20 @@ func Test_decodeBranch(t *testing.T) { // missing encoded branch value }), ), - header: 193, // node type 3 (branch with value) and key length 1 - errWrapped: ErrDecodeValue, - errMessage: "cannot decode value: EOF", + variant: branchWithValueVariant.bits, + partialKeyLength: 1, + errWrapped: ErrDecodeValue, + errMessage: "cannot decode value: EOF", }, - "success node type 3": { - reader: bytes.NewBuffer( - concatByteSlices([][]byte{ - {9}, // key data - {0, 4}, // children bitmap - scaleEncodeBytes(t, 7, 8, 9), // branch value - scaleEncodeBytes(t, 1, 2, 3, 4, 5), // child hash - }), - ), - header: 193, // node type 3 (branch with value) and key length 1 + "success for branch with value": { + reader: bytes.NewBuffer(concatByteSlices([][]byte{ + {9}, // key data + {0, 4}, // children bitmap + scaleEncodeBytes(t, 7, 8, 9), // branch value + scaleEncodedChildHash, + })), + variant: branchWithValueVariant.bits, + partialKeyLength: 1, branch: &Node{ Key: []byte{9}, Value: []byte{7, 8, 9}, @@ -264,13 +221,71 @@ func Test_decodeBranch(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, { - HashDigest: []byte{1, 2, 3, 4, 5}, + HashDigest: childHash, + Dirty: true, }, }), Dirty: true, Descendants: 1, }, }, + "branch with inlined node decoding error": { + reader: bytes.NewBuffer(concatByteSlices([][]byte{ + {1}, // key data + {0b0000_0001, 0b0000_0000}, // children bitmap + scaleEncodeBytes(t, 1), // branch value + {0}, // garbage inlined node + })), + variant: branchWithValueVariant.bits, + partialKeyLength: 1, + errWrapped: io.EOF, + errMessage: "decoding inlined child at index 0: " + + "decoding header: reading header byte: EOF", + }, + "branch with inlined branch and leaf": { + reader: bytes.NewBuffer(concatByteSlices([][]byte{ + {1}, // key data + {0b0000_0011, 0b0000_0000}, // children bitmap + // top level inlined leaf less than 32 bytes + scaleEncodeByteSlice(t, concatByteSlices([][]byte{ + {leafVariant.bits | 1}, // partial key length of 1 + {2}, // key data + scaleEncodeBytes(t, 2), // value data + })), + // top level inlined branch less than 32 bytes + scaleEncodeByteSlice(t, concatByteSlices([][]byte{ + {branchWithValueVariant.bits | 1}, // partial key length of 1 + {3}, // key data + {0b0000_0001, 0b0000_0000}, // children bitmap + scaleEncodeBytes(t, 3), // branch value + // bottom level leaf + scaleEncodeByteSlice(t, concatByteSlices([][]byte{ + {leafVariant.bits | 1}, // partial key length of 1 + {4}, // key data + scaleEncodeBytes(t, 4), // value data + })), + })), + })), + variant: branchVariant.bits, + partialKeyLength: 1, + branch: &Node{ + Key: []byte{1}, + Descendants: 3, + Children: padRightChildren([]*Node{ + {Key: []byte{2}, Value: []byte{2}, Dirty: true}, + { + Key: []byte{3}, + Value: []byte{3}, + Dirty: true, + Descendants: 1, + Children: padRightChildren([]*Node{ + {Key: []byte{4}, Value: []byte{4}, Dirty: true}, + }), + }, + }), + Dirty: true, + }, + }, } for name, testCase := range testCases { @@ -278,7 +293,8 @@ func Test_decodeBranch(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - branch, err := decodeBranch(testCase.reader, testCase.header) + branch, err := decodeBranch(testCase.reader, + testCase.variant, testCase.partialKeyLength) assert.ErrorIs(t, err, testCase.errWrapped) if err != nil { @@ -293,35 +309,39 @@ func Test_decodeLeaf(t *testing.T) { t.Parallel() testCases := map[string]struct { - reader io.Reader - header byte - leaf *Node - errWrapped error - errMessage string + reader io.Reader + variant byte + partialKeyLength uint16 + leaf *Node + errWrapped error + errMessage string }{ "key decoding error": { reader: bytes.NewBuffer([]byte{ // missing key data byte }), - header: 65, // node type 1 (leaf) and key length 1 - errWrapped: ErrReadKeyData, - errMessage: "cannot decode key: cannot read key data: EOF", + variant: leafVariant.bits, + partialKeyLength: 1, + errWrapped: io.EOF, + errMessage: "cannot decode key: reading from reader: EOF", }, "value decoding error": { reader: bytes.NewBuffer([]byte{ 9, // key data 255, 255, // bad value data }), - header: 65, // node type 1 (leaf) and key length 1 - errWrapped: ErrDecodeValue, - errMessage: "cannot decode value: could not decode invalid integer", + variant: leafVariant.bits, + partialKeyLength: 1, + errWrapped: ErrDecodeValue, + errMessage: "cannot decode value: could not decode invalid integer", }, "zero value": { reader: bytes.NewBuffer([]byte{ 9, // key data // missing value data }), - header: 65, // node type 1 (leaf) and key length 1 + variant: leafVariant.bits, + partialKeyLength: 1, leaf: &Node{ Key: []byte{9}, Dirty: true, @@ -334,7 +354,8 @@ func Test_decodeLeaf(t *testing.T) { scaleEncodeBytes(t, 1, 2, 3, 4, 5), // value data }), ), - header: 65, // node type 1 (leaf) and key length 1 + variant: leafVariant.bits, + partialKeyLength: 1, leaf: &Node{ Key: []byte{9}, Value: []byte{1, 2, 3, 4, 5}, @@ -348,7 +369,8 @@ func Test_decodeLeaf(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - leaf, err := decodeLeaf(testCase.reader, testCase.header) + leaf, err := decodeLeaf(testCase.reader, + testCase.partialKeyLength) assert.ErrorIs(t, err, testCase.errWrapped) if err != nil { diff --git a/internal/trie/node/encode.go b/internal/trie/node/encode.go index c7890e16a8..c605da7302 100644 --- a/internal/trie/node/encode.go +++ b/internal/trie/node/encode.go @@ -12,7 +12,9 @@ import ( ) // Encode encodes the node to the buffer given. -// The encoding format is documented in encode_doc.go. +// The encoding format is documented in the README.md +// of this package, and specified in the Polkadot spec at +// https://spec.polkadot.network/#sect-state-storage func (n *Node) Encode(buffer Buffer) (err error) { if !n.Dirty && n.Encoding != nil { _, err = buffer.Write(n.Encoding) @@ -43,8 +45,7 @@ func (n *Node) Encode(buffer Buffer) (err error) { // check value is not nil for branch nodes, even though // leaf nodes always have a non-nil value. - if n.Type() == Leaf || n.Value != nil { - // TODO remove `n.Type() == Leaf` and update tests + if n.Value != nil { encodedValue, err := scale.Marshal(n.Value) // TODO scale encoder to write to buffer if err != nil { return fmt.Errorf("cannot scale encode value: %w", err) diff --git a/internal/trie/node/encode_decode_test.go b/internal/trie/node/encode_decode_test.go index 8c6757b4ef..c92a1a2751 100644 --- a/internal/trie/node/encode_decode_test.go +++ b/internal/trie/node/encode_decode_test.go @@ -93,6 +93,7 @@ func Test_Branch_Encode_Decode(t *testing.T) { 14, 15, 16, 17, 10, 11, 12, 13, }, + Dirty: true, }, }), }, @@ -109,6 +110,7 @@ func Test_Branch_Encode_Decode(t *testing.T) { 21, 186, 226, 204, 145, 132, 5, 39, 204, }, + Dirty: true, }, }), Dirty: true, @@ -127,12 +129,10 @@ func Test_Branch_Encode_Decode(t *testing.T) { err := testCase.branchToEncode.Encode(buffer) require.NoError(t, err) - oneBuffer := make([]byte, 1) - _, err = buffer.Read(oneBuffer) + variant, partialKeyLength, err := decodeHeader(buffer) require.NoError(t, err) - header := oneBuffer[0] - resultBranch, err := decodeBranch(buffer, header) + resultBranch, err := decodeBranch(buffer, variant, partialKeyLength) require.NoError(t, err) assert.Equal(t, testCase.branchDecoded, resultBranch) diff --git a/internal/trie/node/encode_doc.go b/internal/trie/node/encode_doc.go deleted file mode 100644 index 1a8b6a1c0a..0000000000 --- a/internal/trie/node/encode_doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package node - -//nolint:lll -// Modified Merkle-Patricia Trie -// See https://github.com/w3f/polkadot-spec/blob/master/runtime-environment-spec/polkadot_re_spec.pdf for the full specification. -// -// Note that for the following definitions, `|` denotes concatenation -// -// Branch encoding: -// NodeHeader | Extra partial key length | Partial Key | Value -// `NodeHeader` is a byte such that: -// most significant two bits of `NodeHeader`: 10 if branch w/o value, 11 if branch w/ value -// least significant six bits of `NodeHeader`: if len(key) > 62, 0x3f, otherwise len(key) -// `Extra partial key length` is included if len(key) > 63 and consists of the remaining key length -// `Partial Key` is the branch's key -// `Value` is: Children Bitmap | SCALE Branch node Value | Hash(Enc(Child[i_1])) | Hash(Enc(Child[i_2])) | ... | Hash(Enc(Child[i_n])) -// -// Leaf encoding: -// NodeHeader | Extra partial key length | Partial Key | Value -// `NodeHeader` is a byte such that: -// most significant two bits of `NodeHeader`: 01 -// least significant six bits of `NodeHeader`: if len(key) > 62, 0x3f, otherwise len(key) -// `Extra partial key length` is included if len(key) > 63 and consists of the remaining key length -// `Partial Key` is the leaf's key -// `Value` is the leaf's SCALE encoded value diff --git a/internal/trie/node/encode_test.go b/internal/trie/node/encode_test.go index e57c13902b..2fb208cf00 100644 --- a/internal/trie/node/encode_test.go +++ b/internal/trie/node/encode_test.go @@ -59,26 +59,28 @@ func Test_Node_Encode(t *testing.T) { }, "leaf header encoding error": { node: &Node{ - Key: make([]byte, 63+(1<<16)), + Key: make([]byte, 1), }, writes: []writeCall{ { - written: []byte{127}, + written: []byte{leafVariant.bits | 1}, + err: errTest, }, }, - wrappedErr: ErrPartialKeyTooBig, - errMessage: "cannot encode header: partial key length cannot be larger than or equal to 2^16: 65536", + wrappedErr: errTest, + errMessage: "cannot encode header: test error", }, "leaf buffer write error for encoded key": { node: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, writes: []writeCall{ { - written: []byte{67}, + written: []byte{leafVariant.bits | 3}, // partial key length 3 }, { - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, err: errTest, }, }, @@ -92,10 +94,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { - written: []byte{67}, + written: []byte{leafVariant.bits | 3}, // partial key length 3 }, { - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { written: []byte{12, 4, 5, 6}, @@ -112,10 +114,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { - written: []byte{67}, + written: []byte{leafVariant.bits | 3}, // partial key length 3 }, { - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { written: []byte{12, 4, 5, 6}, @@ -153,15 +155,16 @@ func Test_Node_Encode(t *testing.T) { "branch header encoding error": { node: &Node{ Children: make([]*Node, ChildrenCapacity), - Key: make([]byte, 63+(1<<16)), + Key: make([]byte, 1), }, writes: []writeCall{ { // header - written: []byte{191}, + written: []byte{branchVariant.bits | 1}, // partial key length 1 + err: errTest, }, }, - wrappedErr: ErrPartialKeyTooBig, - errMessage: "cannot encode header: partial key length cannot be larger than or equal to 2^16: 65536", + wrappedErr: errTest, + errMessage: "cannot encode header: test error", }, "buffer write error for encoded key": { node: &Node{ @@ -171,10 +174,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, err: errTest, }, }, @@ -186,16 +189,16 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -210,16 +213,16 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -237,16 +240,16 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -255,7 +258,7 @@ func Test_Node_Encode(t *testing.T) { written: []byte{4, 100}, }, { // children - written: []byte{12, 65, 9, 0}, + written: []byte{16, 65, 9, 4, 1}, err: errTest, }, }, @@ -269,16 +272,16 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -287,10 +290,10 @@ func Test_Node_Encode(t *testing.T) { written: []byte{4, 100}, }, { // first children - written: []byte{12, 65, 9, 0}, + written: []byte{16, 65, 9, 4, 1}, }, { // second children - written: []byte{12, 65, 11, 0}, + written: []byte{16, 65, 11, 4, 1}, }, }, }, diff --git a/internal/trie/node/hash_test.go b/internal/trie/node/hash_test.go index b2d785342b..703845d514 100644 --- a/internal/trie/node/hash_test.go +++ b/internal/trie/node/hash_test.go @@ -21,16 +21,6 @@ func Test_Node_EncodeAndHash(t *testing.T) { errWrapped error errMessage string }{ - "empty leaf": { - node: Node{}, - expectedNode: Node{ - Encoding: []byte{0x40, 0x0}, - HashDigest: []byte{0x40, 0x0}, - }, - encoding: []byte{0x40, 0x0}, - hash: []byte{0x40, 0x0}, - isRoot: false, - }, "small leaf encoding": { node: Node{ Key: []byte{1}, @@ -93,14 +83,15 @@ func Test_Node_EncodeAndHash(t *testing.T) { }, "large leaf encoding": { node: Node{ - Key: repeatBytes(65, 7), + Key: repeatBytes(65, 7), + Value: []byte{0x01}, }, expectedNode: Node{ - Encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x0}, //nolint:lll - HashDigest: []byte{0xfb, 0xae, 0x31, 0x4b, 0xef, 0x31, 0x9, 0xc7, 0x62, 0x99, 0x9d, 0x40, 0x9b, 0xd4, 0xdc, 0x64, 0xe7, 0x39, 0x46, 0x8b, 0xd3, 0xaf, 0xe8, 0x63, 0x9d, 0xf9, 0x41, 0x40, 0x76, 0x40, 0x10, 0xa3}, //nolint:lll + Encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x4, 0x1}, //nolint:lll + HashDigest: []byte{0xd2, 0x1d, 0x43, 0x7, 0x18, 0x17, 0x1b, 0xf1, 0x45, 0x9c, 0xe5, 0x8f, 0xd7, 0x79, 0x82, 0xb, 0xc8, 0x5c, 0x8, 0x47, 0xfe, 0x6c, 0x99, 0xc5, 0xe9, 0x57, 0x87, 0x7, 0x1d, 0x2e, 0x24, 0x5d}, //nolint:lll }, - encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x0}, //nolint:lll - hash: []byte{0xfb, 0xae, 0x31, 0x4b, 0xef, 0x31, 0x9, 0xc7, 0x62, 0x99, 0x9d, 0x40, 0x9b, 0xd4, 0xdc, 0x64, 0xe7, 0x39, 0x46, 0x8b, 0xd3, 0xaf, 0xe8, 0x63, 0x9d, 0xf9, 0x41, 0x40, 0x76, 0x40, 0x10, 0xa3}, //nolint:lll + encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x4, 0x1}, //nolint:lll + hash: []byte{0xd2, 0x1d, 0x43, 0x7, 0x18, 0x17, 0x1b, 0xf1, 0x45, 0x9c, 0xe5, 0x8f, 0xd7, 0x79, 0x82, 0xb, 0xc8, 0x5c, 0x8, 0x47, 0xfe, 0x6c, 0x99, 0xc5, 0xe9, 0x57, 0x87, 0x7, 0x1d, 0x2e, 0x24, 0x5d}, //nolint:lll isRoot: false, }, "empty branch": { diff --git a/internal/trie/node/header.go b/internal/trie/node/header.go index 5177b6f10c..033c5e84e7 100644 --- a/internal/trie/node/header.go +++ b/internal/trie/node/header.go @@ -4,44 +4,151 @@ package node import ( + "errors" + "fmt" "io" ) -const ( - leafHeader byte = 1 // 01 - branchHeader byte = 2 // 10 - branchWithValueHeader byte = 3 // 11 -) - -const ( - keyLenOffset = 0x3f - nodeHeaderShift = 6 -) - // encodeHeader writes the encoded header for the node. func encodeHeader(node *Node, writer io.Writer) (err error) { - var header byte + partialKeyLength := len(node.Key) + if partialKeyLength > int(maxPartialKeyLength) { + panic(fmt.Sprintf("partial key length is too big: %d", partialKeyLength)) + } + + // Merge variant byte and partial key length together + var variant variant if node.Type() == Leaf { - header = leafHeader + variant = leafVariant } else if node.Value == nil { - header = branchHeader + variant = branchVariant } else { - header = branchWithValueHeader + variant = branchWithValueVariant } - header <<= nodeHeaderShift - if len(node.Key) < keyLenOffset { - header |= byte(len(node.Key)) - _, err = writer.Write([]byte{header}) + buffer := make([]byte, 1) + buffer[0] = variant.bits + partialKeyLengthMask := ^variant.mask + + if partialKeyLength < int(partialKeyLengthMask) { + // Partial key length fits in header byte + buffer[0] |= byte(partialKeyLength) + _, err = writer.Write(buffer) return err } - header = header | keyLenOffset - _, err = writer.Write([]byte{header}) + // Partial key length does not fit in header byte only + buffer[0] |= partialKeyLengthMask + partialKeyLength -= int(partialKeyLengthMask) + _, err = writer.Write(buffer) if err != nil { return err } - err = encodeKeyLength(len(node.Key), writer) - return err + for { + buffer[0] = 255 + if partialKeyLength < 255 { + buffer[0] = byte(partialKeyLength) + } + + _, err = writer.Write(buffer) + if err != nil { + return err + } + + partialKeyLength -= int(buffer[0]) + + if buffer[0] < 255 { + break + } + } + + return nil +} + +var ( + ErrPartialKeyTooBig = errors.New("partial key length cannot be larger than 2^16") +) + +func decodeHeader(reader io.Reader) (variant byte, + partialKeyLength uint16, err error) { + buffer := make([]byte, 1) + _, err = reader.Read(buffer) + if err != nil { + return 0, 0, fmt.Errorf("reading header byte: %w", err) + } + + variant, partialKeyLengthHeader, partialKeyLengthHeaderMask, + err := decodeHeaderByte(buffer[0]) + if err != nil { + return 0, 0, fmt.Errorf("decoding header byte: %w", err) + } + + partialKeyLength = uint16(partialKeyLengthHeader) + if partialKeyLengthHeader < partialKeyLengthHeaderMask { + // partial key length is contained in the first byte. + return variant, partialKeyLength, nil + } + + // the partial key length header byte is equal to its maximum + // possible value; this means the partial key length is greater + // than this (0 to 2^6 - 1 = 63) maximum value, and we need to + // accumulate the next bytes from the reader to get the full + // partial key length. + // Specification: https://spec.polkadot.network/#defn-node-header + var previousKeyLength uint16 // used to track an eventual overflow + for { + _, err = reader.Read(buffer) + if err != nil { + return 0, 0, fmt.Errorf("reading key length: %w", err) + } + + previousKeyLength = partialKeyLength + partialKeyLength += uint16(buffer[0]) + + if partialKeyLength < previousKeyLength { + // the partial key can have a length up to 65535 which is the + // maximum uint16 value; therefore if we overflowed, we went over + // this maximum. + overflowed := maxPartialKeyLength - previousKeyLength + partialKeyLength + return 0, 0, fmt.Errorf("%w: overflowed by %d", ErrPartialKeyTooBig, overflowed) + } + + if buffer[0] < 255 { + // the end of the partial key length has been reached. + return variant, partialKeyLength, nil + } + } +} + +var ErrVariantUnknown = errors.New("node variant is unknown") + +func decodeHeaderByte(header byte) (variantBits, + partialKeyLengthHeader, partialKeyLengthHeaderMask byte, err error) { + // variants is a slice of all variants sorted in ascending + // order by the number of bits each variant mask occupy + // in the header byte. + // See https://spec.polkadot.network/#defn-node-header + // Performance note: see `Benchmark_decodeHeaderByte`; + // running with a locally scoped slice is as fast as having + // it at global scope. + variants := []variant{ + leafVariant, // mask 1100_0000 + branchVariant, // mask 1100_0000 + branchWithValueVariant, // mask 1100_0000 + } + + for i := len(variants) - 1; i >= 0; i-- { + variantBits = header & variants[i].mask + if variantBits != variants[i].bits { + continue + } + + partialKeyLengthHeaderMask = ^variants[i].mask + partialKeyLengthHeader = header & partialKeyLengthHeaderMask + return variantBits, partialKeyLengthHeader, + partialKeyLengthHeaderMask, nil + } + + return 0, 0, 0, fmt.Errorf("%w: for header byte %08b", ErrVariantUnknown, header) } diff --git a/internal/trie/node/header_test.go b/internal/trie/node/header_test.go index 1ed826483a..bc77715d1c 100644 --- a/internal/trie/node/header_test.go +++ b/internal/trie/node/header_test.go @@ -4,10 +4,14 @@ package node import ( + "bytes" + "io" + "math" "testing" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_encodeHeader(t *testing.T) { @@ -22,7 +26,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0x80}}, + {written: []byte{branchVariant.bits}}, }, }, "branch with value": { @@ -31,7 +35,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xc0}}, + {written: []byte{branchWithValueVariant.bits}}, }, }, "branch with key of length 30": { @@ -40,7 +44,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0x9e}}, + {written: []byte{branchVariant.bits | 30}}, }, }, "branch with key of length 62": { @@ -49,7 +53,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xbe}}, + {written: []byte{branchVariant.bits | 62}}, }, }, "branch with key of length 63": { @@ -58,8 +62,9 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xbf}}, - {written: []byte{0x0}}, + {written: []byte{branchVariant.bits | 63}}, + {written: []byte{0x00}}, // trailing 0 to indicate the partial + // key length is done here. }, }, "branch with key of length 64": { @@ -68,28 +73,17 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xbf}}, - {written: []byte{0x1}}, + {written: []byte{branchVariant.bits | 63}}, + {written: []byte{0x01}}, }, }, - "branch with key too big": { - node: &Node{ - Key: make([]byte, 65535+63), - Children: make([]*Node, ChildrenCapacity), - }, - writes: []writeCall{ - {written: []byte{0xbf}}, - }, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be larger than or equal to 2^16: 65535", - }, "branch with small key length write error": { node: &Node{ Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ { - written: []byte{0x80}, + written: []byte{branchVariant.bits}, err: errTest, }, }, @@ -98,12 +92,15 @@ func Test_encodeHeader(t *testing.T) { }, "branch with long key length write error": { node: &Node{ - Key: make([]byte, 64), + Key: make([]byte, int(^branchVariant.mask)+1), Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ { - written: []byte{0xbf}, + written: []byte{branchVariant.bits | ^branchVariant.mask}, + }, + { + written: []byte{0x01}, err: errTest, }, }, @@ -111,9 +108,9 @@ func Test_encodeHeader(t *testing.T) { errMessage: "test error", }, "leaf with no key": { - node: &Node{}, + node: &Node{Value: []byte{1}}, writes: []writeCall{ - {written: []byte{0x40}}, + {written: []byte{leafVariant.bits}}, }, }, "leaf with key of length 30": { @@ -121,7 +118,7 @@ func Test_encodeHeader(t *testing.T) { Key: make([]byte, 30), }, writes: []writeCall{ - {written: []byte{0x5e}}, + {written: []byte{leafVariant.bits | 30}}, }, }, "leaf with short key write error": { @@ -130,19 +127,19 @@ func Test_encodeHeader(t *testing.T) { }, writes: []writeCall{ { - written: []byte{0x5e}, + written: []byte{leafVariant.bits | 30}, err: errTest, }, }, errWrapped: errTest, - errMessage: errTest.Error(), + errMessage: "test error", }, "leaf with key of length 62": { node: &Node{ Key: make([]byte, 62), }, writes: []writeCall{ - {written: []byte{0x7e}}, + {written: []byte{leafVariant.bits | 62}}, }, }, "leaf with key of length 63": { @@ -150,7 +147,7 @@ func Test_encodeHeader(t *testing.T) { Key: make([]byte, 63), }, writes: []writeCall{ - {written: []byte{0x7f}}, + {written: []byte{leafVariant.bits | 63}}, {written: []byte{0x0}}, }, }, @@ -159,7 +156,7 @@ func Test_encodeHeader(t *testing.T) { Key: make([]byte, 64), }, writes: []writeCall{ - {written: []byte{0x7f}}, + {written: []byte{leafVariant.bits | 63}}, {written: []byte{0x1}}, }, }, @@ -169,22 +166,32 @@ func Test_encodeHeader(t *testing.T) { }, writes: []writeCall{ { - written: []byte{0x7f}, + written: []byte{leafVariant.bits | 63}, err: errTest, }, }, errWrapped: errTest, - errMessage: errTest.Error(), + errMessage: "test error", }, - "leaf with key too big": { + "leaf with key length over 3 bytes": { node: &Node{ - Key: make([]byte, 65535+63), + Key: make([]byte, int(^leafVariant.mask)+0b1111_1111+0b0000_0001), }, writes: []writeCall{ - {written: []byte{0x7f}}, + {written: []byte{leafVariant.bits | ^leafVariant.mask}}, + {written: []byte{0b1111_1111}}, + {written: []byte{0b0000_0001}}, + }, + }, + "leaf with key length over 3 bytes and last byte zero": { + node: &Node{ + Key: make([]byte, int(^leafVariant.mask)+0b1111_1111), + }, + writes: []writeCall{ + {written: []byte{leafVariant.bits | ^leafVariant.mask}}, + {written: []byte{0b1111_1111}}, + {written: []byte{0x00}}, }, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be larger than or equal to 2^16: 65535", }, } @@ -215,4 +222,211 @@ func Test_encodeHeader(t *testing.T) { } }) } + + t.Run("partial key length is too big", func(t *testing.T) { + t.Parallel() + + const keyLength = uint(maxPartialKeyLength) + 1 + node := &Node{ + Key: make([]byte, keyLength), + } + + assert.PanicsWithValue(t, "partial key length is too big: 65536", func() { + _ = encodeHeader(node, io.Discard) + }) + }) +} + +func Test_encodeHeader_At_Maximum(t *testing.T) { + t.Parallel() + + // Note: this test case cannot run with the + // mock writer since it's too slow, so we use + // an actual buffer. + + variant := leafVariant.bits + const partialKeyLengthHeaderMask = 0b0011_1111 + const keyLength = uint(maxPartialKeyLength) + extraKeyBytesNeeded := math.Ceil(float64(maxPartialKeyLength-partialKeyLengthHeaderMask) / 255.0) + expectedEncodingLength := 1 + int(extraKeyBytesNeeded) + + lengthLeft := maxPartialKeyLength + expectedBytes := make([]byte, expectedEncodingLength) + expectedBytes[0] = variant | partialKeyLengthHeaderMask + lengthLeft -= partialKeyLengthHeaderMask + for i := 1; i < len(expectedBytes)-1; i++ { + expectedBytes[i] = 255 + lengthLeft -= 255 + } + expectedBytes[len(expectedBytes)-1] = byte(lengthLeft) + + buffer := bytes.NewBuffer(nil) + buffer.Grow(expectedEncodingLength) + + node := &Node{ + Key: make([]byte, keyLength), + } + + err := encodeHeader(node, buffer) + + require.NoError(t, err) + assert.Equal(t, expectedBytes, buffer.Bytes()) +} + +func Test_decodeHeader(t *testing.T) { + testCases := map[string]struct { + reads []readCall + variant byte + partialKeyLength uint16 + errWrapped error + errMessage string + }{ + "first byte read error": { + reads: []readCall{ + {buffArgCap: 1, err: errTest}, + }, + errWrapped: errTest, + errMessage: "reading header byte: test error", + }, + "header byte decoding error": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{0b0011_1110}}, + }, + errWrapped: ErrVariantUnknown, + errMessage: "decoding header byte: node variant is unknown: for header byte 00111110", + }, + "partial key length contained in first byte": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{leafVariant.bits | 0b0011_1110}}, + }, + variant: leafVariant.bits, + partialKeyLength: uint16(0b0011_1110), + }, + "long partial key length and second byte read error": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{leafVariant.bits | 0b0011_1111}}, + {buffArgCap: 1, err: errTest}, + }, + errWrapped: errTest, + errMessage: "reading key length: test error", + }, + "partial key length spread on multiple bytes": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{leafVariant.bits | 0b0011_1111}}, + {buffArgCap: 1, read: []byte{0b1111_1111}}, + {buffArgCap: 1, read: []byte{0b1111_0000}}, + }, + variant: leafVariant.bits, + partialKeyLength: uint16(0b0011_1111 + 0b1111_1111 + 0b1111_0000), + }, + "partial key length too long": { + reads: repeatReadCall(readCall{ + buffArgCap: 1, + read: []byte{0b1111_1111}, + }, 258), + errWrapped: ErrPartialKeyTooBig, + errMessage: "partial key length cannot be larger than 2^16: overflowed by 254", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + + reader := NewMockReader(ctrl) + var previousCall *gomock.Call + for _, readCall := range testCase.reads { + readCall := readCall // required variable pinning + byteSliceCapMatcher := newByteSliceCapMatcher(readCall.buffArgCap) + call := reader.EXPECT().Read(byteSliceCapMatcher). + DoAndReturn(func(b []byte) (n int, err error) { + copy(b, readCall.read) + return readCall.n, readCall.err + }) + if previousCall != nil { + call.After(previousCall) + } + previousCall = call + } + + variant, partialKeyLength, err := decodeHeader(reader) + + assert.Equal(t, testCase.variant, variant) + assert.Equal(t, int(testCase.partialKeyLength), int(partialKeyLength)) + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + }) + } +} + +func Test_decodeHeaderByte(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + header byte + variantBits byte + partialKeyLengthHeader byte + partialKeyLengthHeaderMask byte + errWrapped error + errMessage string + }{ + "branch with value header": { + header: 0b1110_1001, + variantBits: 0b1100_0000, + partialKeyLengthHeader: 0b0010_1001, + partialKeyLengthHeaderMask: 0b0011_1111, + }, + "branch header": { + header: 0b1010_1001, + variantBits: 0b1000_0000, + partialKeyLengthHeader: 0b0010_1001, + partialKeyLengthHeaderMask: 0b0011_1111, + }, + "leaf header": { + header: 0b0110_1001, + variantBits: 0b0100_0000, + partialKeyLengthHeader: 0b0010_1001, + partialKeyLengthHeaderMask: 0b0011_1111, + }, + "unknown variant header": { + header: 0b0000_0000, + errWrapped: ErrVariantUnknown, + errMessage: "node variant is unknown: for header byte 00000000", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + variantBits, partialKeyLengthHeader, + partialKeyLengthHeaderMask, err := decodeHeaderByte(testCase.header) + + assert.Equal(t, testCase.variantBits, variantBits) + assert.Equal(t, testCase.partialKeyLengthHeader, partialKeyLengthHeader) + assert.Equal(t, testCase.partialKeyLengthHeaderMask, partialKeyLengthHeaderMask) + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + }) + } +} + +func Benchmark_decodeHeaderByte(b *testing.B) { + // With global scoped variants slice: + // 3.453 ns/op 0 B/op 0 allocs/op + // With locally scoped variants slice: + // 3.441 ns/op 0 B/op 0 allocs/op + header := leafVariant.bits | 0b0000_0001 + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _, _ = decodeHeaderByte(header) + } } diff --git a/internal/trie/node/key.go b/internal/trie/node/key.go index 3b450513bb..343a5d747d 100644 --- a/internal/trie/node/key.go +++ b/internal/trie/node/key.go @@ -4,92 +4,31 @@ package node import ( - "bytes" "errors" "fmt" "io" "github.com/ChainSafe/gossamer/internal/trie/codec" - "github.com/ChainSafe/gossamer/internal/trie/pools" ) -const maxPartialKeySize = ^uint16(0) +const maxPartialKeyLength = ^uint16(0) -var ( - ErrPartialKeyTooBig = errors.New("partial key length cannot be larger than or equal to 2^16") - ErrReadKeyLength = errors.New("cannot read key length") - ErrReadKeyData = errors.New("cannot read key data") -) - -// encodeKeyLength encodes the key length. -func encodeKeyLength(keyLength int, writer io.Writer) (err error) { - keyLength -= 63 - - if keyLength >= int(maxPartialKeySize) { - return fmt.Errorf("%w: %d", - ErrPartialKeyTooBig, keyLength) - } - - for i := uint16(0); i < maxPartialKeySize; i++ { - if keyLength < 255 { - _, err = writer.Write([]byte{byte(keyLength)}) - if err != nil { - return err - } - break - } - _, err = writer.Write([]byte{255}) - if err != nil { - return err - } - - keyLength -= 255 - } - - return nil -} +var ErrReaderMismatchCount = errors.New("read unexpected number of bytes from reader") // decodeKey decodes a key from a reader. -func decodeKey(reader io.Reader, keyLengthByte byte) (b []byte, err error) { - keyLength := int(keyLengthByte) - - if keyLengthByte == keyLenOffset { - // partial key longer than 63, read next bytes for rest of pk len - buffer := pools.SingleByteBuffers.Get().(*bytes.Buffer) - defer pools.SingleByteBuffers.Put(buffer) - oneByteBuf := buffer.Bytes() - for { - _, err = reader.Read(oneByteBuf) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrReadKeyLength, err) - } - nextKeyLen := oneByteBuf[0] - - keyLength += int(nextKeyLen) - - if nextKeyLen < 0xff { - break - } - - if keyLength >= int(maxPartialKeySize) { - return nil, fmt.Errorf("%w: %d", - ErrPartialKeyTooBig, keyLength) - } - } - } - - if keyLength == 0 { +func decodeKey(reader io.Reader, partialKeyLength uint16) (b []byte, err error) { + if partialKeyLength == 0 { return []byte{}, nil } - key := make([]byte, keyLength/2+keyLength%2) + key := make([]byte, partialKeyLength/2+partialKeyLength%2) n, err := reader.Read(key) if err != nil { - return nil, fmt.Errorf("%w: %s", ErrReadKeyData, err) + return nil, fmt.Errorf("reading from reader: %w", err) } else if n != len(key) { - return nil, fmt.Errorf("%w: read %d bytes instead of %d", - ErrReadKeyData, n, len(key)) + return nil, fmt.Errorf("%w: read %d bytes instead of expected %d bytes", + ErrReaderMismatchCount, n, len(key)) } - return codec.KeyLEToNibbles(key)[keyLength%2:], nil + return codec.KeyLEToNibbles(key)[partialKeyLength%2:], nil } diff --git a/internal/trie/node/key_test.go b/internal/trie/node/key_test.go index 2e21825cce..930a97c656 100644 --- a/internal/trie/node/key_test.go +++ b/internal/trie/node/key_test.go @@ -4,13 +4,11 @@ package node import ( - "bytes" "fmt" "testing" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func repeatBytes(n int, b byte) (slice []byte) { @@ -21,129 +19,6 @@ func repeatBytes(n int, b byte) (slice []byte) { return slice } -func Test_encodeKeyLength(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - keyLength int - writes []writeCall - errWrapped error - errMessage string - }{ - "length equal to maximum": { - keyLength: int(maxPartialKeySize) + 63, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be " + - "larger than or equal to 2^16: 65535", - }, - "zero length": { - writes: []writeCall{ - { - written: []byte{0xc1}, - }, - }, - }, - "one length": { - keyLength: 1, - writes: []writeCall{ - { - written: []byte{0xc2}, - }, - }, - }, - "error at single byte write": { - keyLength: 1, - writes: []writeCall{ - { - written: []byte{0xc2}, - err: errTest, - }, - }, - errWrapped: errTest, - errMessage: errTest.Error(), - }, - "error at first byte write": { - keyLength: 255 + 100 + 63, - writes: []writeCall{ - { - written: []byte{255}, - err: errTest, - }, - }, - errWrapped: errTest, - errMessage: errTest.Error(), - }, - "error at last byte write": { - keyLength: 255 + 100 + 63, - writes: []writeCall{ - { - written: []byte{255}, - }, - { - written: []byte{100}, - err: errTest, - }, - }, - errWrapped: errTest, - errMessage: errTest.Error(), - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - writer := NewMockWriter(ctrl) - var previousCall *gomock.Call - for _, write := range testCase.writes { - call := writer.EXPECT(). - Write(write.written). - Return(write.n, write.err) - - if write.err != nil { - break - } else if previousCall != nil { - call.After(previousCall) - } - previousCall = call - } - - err := encodeKeyLength(testCase.keyLength, writer) - - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) - } - }) - } - - t.Run("length at maximum", func(t *testing.T) { - t.Parallel() - - // Note: this test case cannot run with the - // mock writer since it's too slow, so we use - // an actual buffer. - - const keyLength = int(maxPartialKeySize) + 62 - const expectedEncodingLength = 257 - expectedBytes := make([]byte, expectedEncodingLength) - for i := 0; i < len(expectedBytes)-1; i++ { - expectedBytes[i] = 255 - } - expectedBytes[len(expectedBytes)-1] = 254 - - buffer := bytes.NewBuffer(nil) - buffer.Grow(expectedEncodingLength) - - err := encodeKeyLength(keyLength, buffer) - - require.NoError(t, err) - assert.Equal(t, expectedBytes, buffer.Bytes()) - }) -} - //go:generate mockgen -destination=reader_mock_test.go -package $GOPACKAGE io Reader type readCall struct { @@ -153,20 +28,12 @@ type readCall struct { err error } -func repeatReadCalls(rc readCall, length int) (readCalls []readCall) { - readCalls = make([]readCall, length) - for i := range readCalls { - readCalls[i] = readCall{ - buffArgCap: rc.buffArgCap, - n: rc.n, - err: rc.err, - } - if rc.read != nil { - readCalls[i].read = make([]byte, len(rc.read)) - copy(readCalls[i].read, rc.read) - } +func repeatReadCall(base readCall, n int) (calls []readCall) { + calls = make([]readCall, n) + for i := range calls { + calls[i] = base } - return readCalls + return calls } var _ gomock.Matcher = (*byteSliceCapMatcher)(nil) @@ -184,7 +51,7 @@ func (b *byteSliceCapMatcher) Matches(x interface{}) bool { } func (b *byteSliceCapMatcher) String() string { - return fmt.Sprintf("capacity of slice is not the expected capacity %d", b.capacity) + return fmt.Sprintf("slice with capacity %d", b.capacity) } func newByteSliceCapMatcher(capacity int) *byteSliceCapMatcher { @@ -197,45 +64,45 @@ func Test_decodeKey(t *testing.T) { t.Parallel() testCases := map[string]struct { - reads []readCall - keyLength byte - b []byte - errWrapped error - errMessage string + reads []readCall + partialKeyLength uint16 + b []byte + errWrapped error + errMessage string }{ "zero key length": { - b: []byte{}, + partialKeyLength: 0, + b: []byte{}, }, "short key length": { reads: []readCall{ {buffArgCap: 3, read: []byte{1, 2, 3}, n: 3}, }, - keyLength: 5, - b: []byte{0x1, 0x0, 0x2, 0x0, 0x3}, + partialKeyLength: 5, + b: []byte{0x1, 0x0, 0x2, 0x0, 0x3}, }, "key read error": { reads: []readCall{ {buffArgCap: 3, err: errTest}, }, - keyLength: 5, - errWrapped: ErrReadKeyData, - errMessage: "cannot read key data: test error", + partialKeyLength: 5, + errWrapped: errTest, + errMessage: "reading from reader: test error", }, "key read bytes count mismatch": { reads: []readCall{ {buffArgCap: 3, n: 2}, }, - keyLength: 5, - errWrapped: ErrReadKeyData, - errMessage: "cannot read key data: read 2 bytes instead of 3", + partialKeyLength: 5, + errWrapped: ErrReaderMismatchCount, + errMessage: "read unexpected number of bytes from reader: read 2 bytes instead of expected 3 bytes", }, "long key length": { reads: []readCall{ - {buffArgCap: 1, read: []byte{6}, n: 1}, // key length {buffArgCap: 35, read: repeatBytes(35, 7), n: 35}, // key data }, - keyLength: 0x3f, + partialKeyLength: 70, b: []byte{ 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, @@ -245,20 +112,6 @@ func Test_decodeKey(t *testing.T) { 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7}, }, - "key length read error": { - reads: []readCall{ - {buffArgCap: 1, err: errTest}, - }, - keyLength: 0x3f, - errWrapped: ErrReadKeyLength, - errMessage: "cannot read key length: test error", - }, - "key length too big": { - reads: repeatReadCalls(readCall{buffArgCap: 1, read: []byte{0xff}, n: 1}, 257), - keyLength: 0x3f, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be larger than or equal to 2^16: 65598", - }, } for name, testCase := range testCases { @@ -270,6 +123,7 @@ func Test_decodeKey(t *testing.T) { reader := NewMockReader(ctrl) var previousCall *gomock.Call for _, readCall := range testCase.reads { + readCall := readCall // required variable pinning byteSliceCapMatcher := newByteSliceCapMatcher(readCall.buffArgCap) call := reader.EXPECT().Read(byteSliceCapMatcher). DoAndReturn(func(b []byte) (n int, err error) { @@ -282,7 +136,7 @@ func Test_decodeKey(t *testing.T) { previousCall = call } - b, err := decodeKey(reader, testCase.keyLength) + b, err := decodeKey(reader, testCase.partialKeyLength) assert.ErrorIs(t, err, testCase.errWrapped) if err != nil { diff --git a/internal/trie/node/node.go b/internal/trie/node/node.go index 493ca1de91..a40cf31fd7 100644 --- a/internal/trie/node/node.go +++ b/internal/trie/node/node.go @@ -1,6 +1,8 @@ // Copyright 2021 ChainSafe Systems (ON) // SPDX-License-Identifier: LGPL-3.0-only +// Package node defines the `Node` structure with methods +// to be used in the modified Merkle-Patricia Radix-16 trie. package node import ( diff --git a/internal/trie/node/node_test.go b/internal/trie/node/node_test.go index a16f3c8912..5fb99c189b 100644 --- a/internal/trie/node/node_test.go +++ b/internal/trie/node/node_test.go @@ -16,16 +16,6 @@ func Test_Node_String(t *testing.T) { node *Node s string }{ - "empty leaf": { - node: &Node{}, - s: `Leaf -├── Generation: 0 -├── Dirty: false -├── Key: nil -├── Value: nil -├── Calculated encoding: nil -└── Calculated digest: nil`, - }, "leaf with value smaller than 1024": { node: &Node{ Key: []byte{1, 2}, diff --git a/internal/trie/node/variants.go b/internal/trie/node/variants.go new file mode 100644 index 0000000000..2c75c44904 --- /dev/null +++ b/internal/trie/node/variants.go @@ -0,0 +1,26 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +type variant struct { + bits byte + mask byte +} + +// Node variants +// See https://spec.polkadot.network/#defn-node-header +var ( + leafVariant = variant{ // leaf 01 + bits: 0b0100_0000, + mask: 0b1100_0000, + } + branchVariant = variant{ // branch 10 + bits: 0b1000_0000, + mask: 0b1100_0000, + } + branchWithValueVariant = variant{ // branch 11 + bits: 0b1100_0000, + mask: 0b1100_0000, + } +) diff --git a/internal/trie/pools/pools.go b/internal/trie/pools/pools.go index 855232ef44..1bfe8f5a83 100644 --- a/internal/trie/pools/pools.go +++ b/internal/trie/pools/pools.go @@ -10,15 +10,6 @@ import ( "golang.org/x/crypto/blake2b" ) -// SingleByteBuffers is a sync pool of buffers of capacity 1. -var SingleByteBuffers = &sync.Pool{ - New: func() interface{} { - const bufferLength = 1 - b := make([]byte, bufferLength) - return bytes.NewBuffer(b) - }, -} - // DigestBuffers is a sync pool of buffers of capacity 32. var DigestBuffers = &sync.Pool{ New: func() interface{} { diff --git a/lib/common/hash.go b/lib/common/hash.go index 6ca442e829..0c33b93699 100644 --- a/lib/common/hash.go +++ b/lib/common/hash.go @@ -64,6 +64,12 @@ func (h Hash) String() string { return fmt.Sprintf("0x%x", h[:]) } +// Short returns the first 4 bytes and the last 4 bytes of the hex string for the hash +func (h Hash) Short() string { + const nBytes = 4 + return fmt.Sprintf("0x%x...%x", h[:nBytes], h[len(h)-nBytes:]) +} + // SetBytes sets the hash to the value of b. // If b is larger than len(h), b will be cropped from the left. func (h *Hash) SetBytes(b []byte) { diff --git a/lib/grandpa/errors.go b/lib/grandpa/errors.go index 1908f785cf..c2d2435c29 100644 --- a/lib/grandpa/errors.go +++ b/lib/grandpa/errors.go @@ -61,7 +61,8 @@ var ( ErrNotCommitMessage = errors.New("cannot get finalised hash from VoteMessage") // ErrNoJustification is returned when no justification can be found for a block, ie. it has not been finalised - ErrNoJustification = errors.New("no justification found for block") + ErrNoJustification = errors.New("no justification found for block") + ErrJustificationMismatch = errors.New("justification does not correspond to given block hash") ErrBlockHashMismatch = errors.New("block hash does not correspond to given block number") diff --git a/lib/grandpa/grandpa.go b/lib/grandpa/grandpa.go index ffc2a01d9c..058ce2117b 100644 --- a/lib/grandpa/grandpa.go +++ b/lib/grandpa/grandpa.go @@ -13,6 +13,7 @@ import ( "sync/atomic" "time" + "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" @@ -46,7 +47,6 @@ type Service struct { cancel context.CancelFunc blockState BlockState grandpaState GrandpaState - digestHandler DigestHandler keypair *ed25519.Keypair // TODO: change to grandpa keystore (#1870) mapLock sync.Mutex chanLock sync.Mutex @@ -83,16 +83,15 @@ type Service struct { // Config represents a GRANDPA service configuration type Config struct { - LogLvl log.Level - BlockState BlockState - GrandpaState GrandpaState - DigestHandler DigestHandler - Network Network - Voters []Voter - Keypair *ed25519.Keypair - Authority bool - Interval time.Duration - Telemetry telemetry.Client + LogLvl log.Level + BlockState BlockState + GrandpaState GrandpaState + Network Network + Voters []Voter + Keypair *ed25519.Keypair + Authority bool + Interval time.Duration + Telemetry telemetry.Client } // NewService returns a new GRANDPA Service instance. @@ -105,10 +104,6 @@ func NewService(cfg *Config) (*Service, error) { return nil, ErrNilGrandpaState } - if cfg.DigestHandler == nil { - return nil, ErrNilDigestHandler - } - if cfg.Keypair == nil && cfg.Authority { return nil, ErrNilKeypair } @@ -157,7 +152,6 @@ func NewService(cfg *Config) (*Service, error) { state: NewState(cfg.Voters, setID, round), blockState: cfg.BlockState, grandpaState: cfg.GrandpaState, - digestHandler: cfg.DigestHandler, keypair: cfg.Keypair, authority: cfg.Authority, prevotes: new(sync.Map), @@ -244,7 +238,7 @@ func (s *Service) authorities() []*types.Authority { func (s *Service) updateAuthorities() error { currSetID, err := s.grandpaState.GetCurrentSetID() if err != nil { - return err + return fmt.Errorf("cannot get current set id: %w", err) } // set ID hasn't changed, do nothing @@ -254,7 +248,7 @@ func (s *Service) updateAuthorities() error { nextAuthorities, err := s.grandpaState.GetAuthorities(currSetID) if err != nil { - return err + return fmt.Errorf("cannot get authorities for set id %d: %w", currSetID, err) } s.state.voters = nextAuthorities @@ -300,12 +294,12 @@ func (s *Service) initiateRound() error { // if there is an authority change, execute it err := s.updateAuthorities() if err != nil { - return err + return fmt.Errorf("cannot update authorities while initiating the round: %w", err) } round, setID, err := s.blockState.GetHighestRoundAndSetID() if err != nil { - return err + return fmt.Errorf("cannot get highest round and set id: %w", err) } if round > s.state.round && setID == s.state.setID { @@ -518,7 +512,7 @@ func (s *Service) playGrandpaRound() error { go s.sendVoteMessage(prevote, vm, roundComplete) logger.Debug("receiving pre-commit messages...") - // through goroutine s.receiveMessages(ctx) + // through goroutine s.receiveVoteMessages(ctx) time.Sleep(s.interval) if s.paused.Load().(bool) { @@ -689,6 +683,11 @@ func (s *Service) deleteVote(key ed25519.PublicKeyBytes, stage Subround) { func (s *Service) determinePreVote() (*Vote, error) { var vote *Vote + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("cannot get best block header: %w", err) + } + // if we receive a vote message from the primary with a // block that's greater than or equal to the current pre-voted block // and greater than the best final candidate from the last round, we choose that. @@ -698,15 +697,16 @@ func (s *Service) determinePreVote() (*Vote, error) { if has && prm.Vote.Number >= uint32(s.head.Number) { vote = &prm.Vote } else { - header, err := s.blockState.BestBlockHeader() - if err != nil { - return nil, err - } + vote = NewVoteFromHeader(bestBlockHeader) + } - vote = NewVoteFromHeader(header) + nextChange, err := s.grandpaState.NextGrandpaAuthorityChange(bestBlockHeader.Hash(), bestBlockHeader.Number) + if errors.Is(err, state.ErrNoNextAuthorityChange) { + return vote, nil + } else if err != nil { + return nil, fmt.Errorf("cannot get next grandpa authority change: %w", err) } - nextChange := s.digestHandler.NextGrandpaAuthorityChange() if uint(vote.Number) > nextChange { header, err := s.blockState.GetHeaderByNumber(nextChange) if err != nil { @@ -730,7 +730,18 @@ func (s *Service) determinePreCommit() (*Vote, error) { s.preVotedBlock[s.state.round] = &pvb s.mapLock.Unlock() - nextChange := s.digestHandler.NextGrandpaAuthorityChange() + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("cannot retrieve best block header: %w", err) + } + + nextChange, err := s.grandpaState.NextGrandpaAuthorityChange(bestBlockHeader.Hash(), bestBlockHeader.Number) + if errors.Is(err, state.ErrNoNextAuthorityChange) { + return &pvb, nil + } else if err != nil { + return nil, fmt.Errorf("cannot get next grandpa authority change: %w", err) + } + if uint(pvb.Number) > nextChange { header, err := s.blockState.GetHeaderByNumber(nextChange) if err != nil { diff --git a/lib/grandpa/grandpa_test.go b/lib/grandpa/grandpa_test.go index 4aaefed652..6905755406 100644 --- a/lib/grandpa/grandpa_test.go +++ b/lib/grandpa/grandpa_test.go @@ -22,8 +22,6 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - - "github.com/ChainSafe/gossamer/lib/grandpa/mocks" ) // testGenesisHeader is a test block header @@ -38,12 +36,6 @@ var ( voters = newTestVoters() ) -func NewMockDigestHandler() *mocks.DigestHandler { - m := new(mocks.DigestHandler) - m.On("NextGrandpaAuthorityChange").Return(uint(2 ^ 64 - 1)) - return m -} - //go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/telemetry Client func newTestState(t *testing.T) *state.Service { @@ -73,7 +65,7 @@ func newTestState(t *testing.T) *state.Service { require.NoError(t, err) block.StoreRuntime(block.BestBlockHash(), rt) - grandpa, err := state.NewGrandpaStateFromGenesis(db, voters) + grandpa, err := state.NewGrandpaStateFromGenesis(db, nil, voters) require.NoError(t, err) return &state.Service{ @@ -104,15 +96,14 @@ func newTestService(t *testing.T) (*Service, *state.Service) { telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Alice().(*ed25519.Keypair), - Authority: true, - Network: net, - Interval: time.Second, - Telemetry: telemetryMock, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Alice().(*ed25519.Keypair), + Authority: true, + Network: net, + Interval: time.Second, + Telemetry: telemetryMock, } gs, err := NewService(cfg) diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 766ca26b29..79dc9b101c 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -562,6 +562,11 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt return nil, err } + if !hash.Equal(fj.Commit.Hash) { + return nil, fmt.Errorf("%w: justification %s and block hash %s", + ErrJustificationMismatch, fj.Commit.Hash.Short(), hash.Short()) + } + setID, err := s.grandpaState.GetSetIDByBlockNumber(uint(fj.Commit.Number)) if err != nil { return nil, fmt.Errorf("cannot get set ID from block number: %w", err) diff --git a/lib/grandpa/message_handler_test.go b/lib/grandpa/message_handler_test.go index 2f43675ed0..0f378b7817 100644 --- a/lib/grandpa/message_handler_test.go +++ b/lib/grandpa/message_handler_test.go @@ -5,6 +5,7 @@ package grandpa import ( "errors" + "fmt" "testing" "time" @@ -789,6 +790,20 @@ func TestMessageHandler_VerifyBlockJustification_invalid(t *testing.T) { returnedJust, err = gs.VerifyBlockJustification(testHash, data) require.Equal(t, ErrMinVotesNotMet, err) require.Nil(t, returnedJust) + + // mismatch justification header and block header + precommits = buildTestJustification(t, 1, round+1, setID, kr, precommit) + just = newJustification(round+1, testHash, number, precommits) + data, err = scale.Marshal(*just) + require.NoError(t, err) + otherHeader := types.NewEmptyHeader() + _, err = gs.VerifyBlockJustification(otherHeader.Hash(), data) + require.ErrorIs(t, err, ErrJustificationMismatch) + + expectedErr := fmt.Sprintf("%s: justification %s and block hash %s", ErrJustificationMismatch, + testHash.Short(), otherHeader.Hash().Short()) + assert.ErrorIs(t, err, ErrJustificationMismatch) + require.EqualError(t, err, expectedErr) } func Test_getEquivocatoryVoters(t *testing.T) { diff --git a/lib/grandpa/mocks_test.go b/lib/grandpa/mocks_test.go index a91d5301de..97ba682105 100644 --- a/lib/grandpa/mocks_test.go +++ b/lib/grandpa/mocks_test.go @@ -495,6 +495,21 @@ func (mr *MockGrandpaStateMockRecorder) GetSetIDByBlockNumber(arg0 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSetIDByBlockNumber", reflect.TypeOf((*MockGrandpaState)(nil).GetSetIDByBlockNumber), arg0) } +// NextGrandpaAuthorityChange mocks base method. +func (m *MockGrandpaState) NextGrandpaAuthorityChange(arg0 common.Hash, arg1 uint) (uint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextGrandpaAuthorityChange", arg0, arg1) + ret0, _ := ret[0].(uint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NextGrandpaAuthorityChange indicates an expected call of NextGrandpaAuthorityChange. +func (mr *MockGrandpaStateMockRecorder) NextGrandpaAuthorityChange(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextGrandpaAuthorityChange", reflect.TypeOf((*MockGrandpaState)(nil).NextGrandpaAuthorityChange), arg0, arg1) +} + // SetLatestRound mocks base method. func (m *MockGrandpaState) SetLatestRound(arg0 uint64) error { m.ctrl.T.Helper() diff --git a/lib/grandpa/round_test.go b/lib/grandpa/round_test.go index d05508b5d6..2ed107a4cb 100644 --- a/lib/grandpa/round_test.go +++ b/lib/grandpa/round_test.go @@ -100,16 +100,15 @@ func setupGrandpa(t *testing.T, kp *ed25519.Keypair) ( SendMessage(gomock.Any()).AnyTimes() cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kp, - LogLvl: log.Info, - Authority: true, - Network: net, - Interval: time.Second, - Telemetry: telemetryMock, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kp, + LogLvl: log.Info, + Authority: true, + Network: net, + Interval: time.Second, + Telemetry: telemetryMock, } gs, err := NewService(cfg) diff --git a/lib/grandpa/state.go b/lib/grandpa/state.go index c6b546316b..15f85f7722 100644 --- a/lib/grandpa/state.go +++ b/lib/grandpa/state.go @@ -51,11 +51,7 @@ type GrandpaState interface { //nolint:revive SetPrecommits(round, setID uint64, data []SignedVote) error GetPrevotes(round, setID uint64) ([]SignedVote, error) GetPrecommits(round, setID uint64) ([]SignedVote, error) -} - -// DigestHandler is the interface required by GRANDPA for the digest handler -type DigestHandler interface { // TODO: use GrandpaState instead (#1871) - NextGrandpaAuthorityChange() uint + NextGrandpaAuthorityChange(bestBlockHash common.Hash, bestBlockNumber uint) (blockHeight uint, err error) } //go:generate mockery --name Network --structname Network --case underscore --keeptree diff --git a/lib/grandpa/vote_message_test.go b/lib/grandpa/vote_message_test.go index e1773613d7..4365e7a685 100644 --- a/lib/grandpa/vote_message_test.go +++ b/lib/grandpa/vote_message_test.go @@ -22,13 +22,12 @@ func TestCheckForEquivocation_NoEquivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -57,13 +56,12 @@ func TestCheckForEquivocation_WithEquivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -103,13 +101,12 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -159,13 +156,12 @@ func TestValidateMessage_Valid(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -193,13 +189,12 @@ func TestValidateMessage_InvalidSignature(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -228,12 +223,11 @@ func TestValidateMessage_SetIDMismatch(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -262,13 +256,12 @@ func TestValidateMessage_Equivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -306,13 +299,12 @@ func TestValidateMessage_BlockDoesNotExist(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -341,13 +333,12 @@ func TestValidateMessage_IsNotDescendant(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) diff --git a/lib/trie/database.go b/lib/trie/database.go index 6c300a8b04..2a676882b4 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -182,7 +182,7 @@ func (t *Trie) load(db chaindb.Database, n *Node) error { hash := child.HashDigest - if len(hash) == 0 && child.Type() == node.Leaf { + if len(hash) == 0 { // node has already been loaded inline // just set encoding + hash digest _, _, err := child.EncodeAndHash(false) @@ -448,8 +448,6 @@ func (t *Trie) GetInsertedNodeHashes() (hashesSet map[common.Hash]struct{}, err } func (t *Trie) getInsertedNodeHashes(n *Node, hashes map[common.Hash]struct{}) (err error) { - // TODO pass map of hashes or slice as argument to avoid copying - // and using more memory. if n == nil || !n.Dirty { return nil } diff --git a/lib/trie/trie_test.go b/lib/trie/trie_test.go index 6069af82f1..b27b68cdd3 100644 --- a/lib/trie/trie_test.go +++ b/lib/trie/trie_test.go @@ -47,18 +47,18 @@ func Test_Trie_Snapshot(t *testing.T) { trie := &Trie{ generation: 8, - root: &Node{Key: []byte{8}}, + root: &Node{Key: []byte{8}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1}: { generation: 1, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {1}: {}, }, }, {2}: { generation: 2, - root: &Node{Key: []byte{2}}, + root: &Node{Key: []byte{2}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {2}: {}, }, @@ -72,16 +72,16 @@ func Test_Trie_Snapshot(t *testing.T) { expectedTrie := &Trie{ generation: 9, - root: &Node{Key: []byte{8}}, + root: &Node{Key: []byte{8}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1}: { generation: 2, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{}, }, {2}: { generation: 3, - root: &Node{Key: []byte{2}}, + root: &Node{Key: []byte{2}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{}, }, }, @@ -221,11 +221,11 @@ func Test_Trie_DeepCopy(t *testing.T) { "filled trie": { trieOriginal: &Trie{ generation: 1, - root: &Node{Key: []byte{1, 2}}, + root: &Node{Key: []byte{1, 2}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1, 2, 3}: { generation: 2, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {1, 2, 3}: {}, {3, 4, 5}: {}, @@ -239,11 +239,11 @@ func Test_Trie_DeepCopy(t *testing.T) { }, trieCopy: &Trie{ generation: 1, - root: &Node{Key: []byte{1, 2}}, + root: &Node{Key: []byte{1, 2}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1, 2, 3}: { generation: 2, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {1, 2, 3}: {}, {3, 4, 5}: {}, @@ -277,11 +277,13 @@ func Test_Trie_RootNode(t *testing.T) { trie := Trie{ root: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, } expectedRoot := &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, } root := trie.RootNode() @@ -330,7 +332,8 @@ func Test_encodeRoot(t *testing.T) { }, "root encoding error": { root: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, bufferCalls: bufferCalls{ writeCalls: []writeCall{ @@ -343,27 +346,30 @@ func Test_encodeRoot(t *testing.T) { errWrapped: errTest, errMessage: "cannot encode header: test error", expectedRoot: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, }, "root encoding success": { root: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, bufferCalls: bufferCalls{ writeCalls: []writeCall{ {written: []byte{66}}, {written: []byte{18}}, - {written: []byte{0}}, + {written: []byte{4, 1}}, }, lenCall: true, lenReturn: 3, bytesCall: true, - bytesReturn: []byte{66, 18, 0}, + bytesReturn: []byte{66, 18, 4, 1}, }, expectedRoot: &Node{ Key: []byte{1, 2}, - Encoding: []byte{66, 18, 0}, + Value: []byte{1}, + Encoding: []byte{66, 18, 4}, }, }, } @@ -446,18 +452,20 @@ func Test_Trie_Hash(t *testing.T) { "leaf root": { trie: Trie{ root: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, }, hash: common.Hash{ - 0x84, 0x7c, 0x95, 0x42, 0x8d, 0x9c, 0xcf, 0xce, - 0xa7, 0x27, 0x15, 0x33, 0x48, 0x74, 0x99, 0x11, - 0x83, 0xb8, 0xe8, 0xc4, 0x80, 0x88, 0xea, 0x4d, - 0x9f, 0x57, 0x82, 0x94, 0xc9, 0x76, 0xf4, 0x6f}, + 0xa8, 0x13, 0x7c, 0xee, 0xb4, 0xad, 0xea, 0xac, + 0x9e, 0x5b, 0x37, 0xe2, 0x8e, 0x7d, 0x64, 0x78, + 0xac, 0xba, 0xb0, 0x6e, 0x90, 0x76, 0xe4, 0x67, + 0xa1, 0xd8, 0xa2, 0x29, 0x4e, 0x4a, 0xd9, 0xa3}, expectedTrie: Trie{ root: &Node{ Key: []byte{1, 2, 3}, - Encoding: []byte{67, 1, 35, 0}, + Value: []byte{1}, + Encoding: []byte{0x43, 0x01, 0x23, 0x04, 0x01}, }, }, }, @@ -468,15 +476,15 @@ func Test_Trie_Hash(t *testing.T) { Value: []byte("branch"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{9}}, + {Key: []byte{9}, Value: []byte{1}}, }), }, }, hash: common.Hash{ - 0xbc, 0x4b, 0x90, 0x4c, 0x65, 0xb1, 0x3b, 0x9b, - 0xcf, 0xe2, 0x32, 0xe3, 0xe6, 0x50, 0x20, 0xd8, - 0x21, 0x96, 0xce, 0xbf, 0x4c, 0xa4, 0xd, 0xaa, - 0xbe, 0x27, 0xab, 0x13, 0xcb, 0xf0, 0xfd, 0xd7}, + 0xaa, 0x7e, 0x57, 0x48, 0xb0, 0x27, 0x4d, 0x18, + 0xf5, 0x1c, 0xfd, 0x36, 0x4c, 0x4b, 0x56, 0x4a, + 0xf5, 0x37, 0x9d, 0xd7, 0xcb, 0xf5, 0x80, 0x15, + 0xf0, 0xe, 0xd3, 0x39, 0x48, 0x21, 0xe3, 0xdd}, expectedTrie: Trie{ root: &Node{ Key: []byte{1, 2, 3}, @@ -485,7 +493,8 @@ func Test_Trie_Hash(t *testing.T) { Children: padRightChildren([]*Node{ { Key: []byte{9}, - Encoding: []byte{0x41, 0x09, 0x00}, + Value: []byte{1}, + Encoding: []byte{0x41, 0x09, 0x04, 0x01}, }, }), }, @@ -671,7 +680,8 @@ func Test_Trie_NextKey(t *testing.T) { "nil key returns root leaf": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, nextKey: []byte{2}, @@ -679,7 +689,8 @@ func Test_Trie_NextKey(t *testing.T) { "key smaller than root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{0x10}, // 10 => [1, 0] in nibbles @@ -717,7 +728,8 @@ func Test_nextKey(t *testing.T) { "nil key returns root leaf": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, nextKey: []byte{2}, @@ -725,7 +737,8 @@ func Test_nextKey(t *testing.T) { "key smaller than root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{1}, @@ -734,7 +747,8 @@ func Test_nextKey(t *testing.T) { "key equal to root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{2}, @@ -742,7 +756,8 @@ func Test_nextKey(t *testing.T) { "key greater than root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{3}, @@ -755,7 +770,8 @@ func Test_nextKey(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }), }, @@ -771,7 +787,8 @@ func Test_nextKey(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }), }, @@ -788,7 +805,8 @@ func Test_nextKey(t *testing.T) { nil, nil, { // full key [1, 2, 3] - Key: []byte{3}, + Key: []byte{3}, + Value: []byte{1}, }, }), }, @@ -806,7 +824,8 @@ func Test_nextKey(t *testing.T) { nil, nil, { // full key [1, 2, 3] - Key: []byte{3}, + Key: []byte{3}, + Value: []byte{1}, }, }), }, @@ -823,7 +842,8 @@ func Test_nextKey(t *testing.T) { nil, nil, { // full key [1, 2, 3] - Key: []byte{3}, + Key: []byte{3}, + Value: []byte{1}, }, }), }, @@ -945,7 +965,7 @@ func Test_nextKey(t *testing.T) { Value: []byte("branch"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }, @@ -1170,7 +1190,7 @@ func Test_Trie_insert(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{1, 0}, @@ -1188,7 +1208,7 @@ func Test_Trie_insert(t *testing.T) { Generation: 1, Dirty: true, }, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, nodesCreated: 1, @@ -1290,7 +1310,8 @@ func Test_Trie_insert(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{1}, value: []byte("leaf"), @@ -1306,7 +1327,8 @@ func Test_Trie_insert(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, key: []byte{1}, value: []byte("leaf"), @@ -1320,6 +1342,7 @@ func Test_Trie_insert(t *testing.T) { nil, nil, { Key: []byte{}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -1362,7 +1385,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte("old"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2}, @@ -1373,7 +1396,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }, @@ -1383,7 +1406,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte("old"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2}, @@ -1394,7 +1417,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }, @@ -1404,7 +1427,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2, 3, 4, 5}, @@ -1415,7 +1438,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, nil, nil, { Key: []byte{4, 5}, @@ -1437,7 +1460,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Key: []byte{4}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }), @@ -1456,7 +1479,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, nil, nil, nil, nil, { Key: []byte{6}, @@ -1475,7 +1498,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2, 4, 5, 6}, @@ -1492,7 +1515,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, { @@ -1510,7 +1533,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{3}, @@ -1527,7 +1550,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, { @@ -1545,7 +1568,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{}, @@ -1563,7 +1586,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }), @@ -1732,15 +1755,18 @@ func Test_Trie_GetKeysWithPrefix(t *testing.T) { Descendants: 2, Children: padRightChildren([]*Node{ { // full key 0, 1, 0, 0, 4 - Key: []byte{4}, + Key: []byte{4}, + Value: []byte{1}, }, { // full key 0, 1, 0, 1, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, }), }, { // full key 0, 1, 1, 9 - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), }, @@ -1785,8 +1811,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1801,8 +1827,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1817,8 +1843,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, key: []byte{1, 3}, @@ -1830,8 +1856,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, key: []byte{1, 2, 3}, @@ -1843,8 +1869,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1855,7 +1881,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with search key equal to common prefix": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 2, 3}, @@ -1865,7 +1892,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with empty search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{}, @@ -1875,7 +1903,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with too deep search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 2, 3, 4}, @@ -1884,7 +1913,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with shorter matching search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 2}, @@ -1894,7 +1924,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with not matching search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 3, 3}, @@ -1931,7 +1962,8 @@ func Test_addAllKeys(t *testing.T) { }, "leaf parent": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, keys: [][]byte{{1}, {2}}, @@ -1943,8 +1975,8 @@ func Test_addAllKeys(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1959,8 +1991,8 @@ func Test_addAllKeys(t *testing.T) { Value: []byte{}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -2005,7 +2037,7 @@ func Test_Trie_Get(t *testing.T) { Value: []byte{1, 2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, { // full key 0, 1, 1, 9 @@ -2064,7 +2096,7 @@ func Test_retrieve(t *testing.T) { Value: []byte{2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{1}, @@ -2076,7 +2108,7 @@ func Test_retrieve(t *testing.T) { Value: []byte{2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, value: []byte{2}, @@ -2087,7 +2119,7 @@ func Test_retrieve(t *testing.T) { Value: []byte{2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{1}, @@ -2159,7 +2191,8 @@ func Test_Trie_ClearPrefixLimit(t *testing.T) { Children: padRightChildren([]*Node{ nil, nil, nil, { - Key: []byte{4}, + Key: []byte{4}, + Value: []byte{1}, }, }), }, @@ -2209,7 +2242,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { }, "leaf parent with common prefix": { parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1}, limit: 1, @@ -2219,7 +2253,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { }, "leaf parent with key equal prefix": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1}, limit: 1, @@ -2232,12 +2267,14 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1, 3}, limit: 1, newParent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, allDeleted: true, }, @@ -2246,12 +2283,14 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1, 2}, limit: 1, newParent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, allDeleted: true, }, @@ -2260,8 +2299,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2275,8 +2314,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2293,8 +2332,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 3}, @@ -2303,8 +2342,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2317,8 +2356,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 2, 3}, @@ -2327,8 +2366,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2341,8 +2380,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2351,8 +2390,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2363,7 +2402,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2378,7 +2417,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2396,7 +2435,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 3}, @@ -2406,7 +2445,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2420,7 +2459,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 2, 3}, @@ -2430,7 +2469,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2444,7 +2483,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2454,7 +2493,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2468,8 +2507,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2482,7 +2521,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{4}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, valuesDeleted: 1, @@ -2494,7 +2533,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, @@ -2517,8 +2556,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2537,8 +2576,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2562,12 +2601,14 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 3, 0, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, }), }, { - Key: []byte{6}, + Key: []byte{6}, + Value: []byte{1}, }, }), }, @@ -2587,7 +2628,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Generation: 1, }, { - Key: []byte{6}, + Key: []byte{6}, + Value: []byte{1}, // Not modified so same generation as before }, }), @@ -2609,7 +2651,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }), @@ -2634,14 +2676,15 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0, 3}, limit: 3, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -2657,14 +2700,15 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, limit: 3, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -2681,7 +2725,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, @@ -2690,7 +2734,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, }, @@ -2733,17 +2777,21 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, newNode: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }, "nil parent": { limit: 1, }, "delete leaf": { - parent: &Node{}, + parent: &Node{ + Value: []byte{1}, + }, limit: 2, valuesDeleted: 1, nodesRemoved: 1, @@ -2778,8 +2826,8 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Key: []byte{3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, limit: 10, @@ -2795,8 +2843,8 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Value: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, limit: 1, @@ -2808,7 +2856,7 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, valuesDeleted: 1, @@ -2823,8 +2871,8 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Value: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, limit: 2, @@ -2846,17 +2894,18 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Descendants: 3, Children: padRightChildren([]*Node{ nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, nil, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, nil, - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, limit: 2, newNode: &Node{ Key: []byte{3, 5, 3}, + Value: []byte{1}, Generation: 1, Dirty: true, }, @@ -2894,12 +2943,12 @@ func Test_Trie_ClearPrefix(t *testing.T) { }{ "nil prefix": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, }, "empty prefix": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, prefix: []byte{}, }, @@ -2913,14 +2962,16 @@ func Test_Trie_ClearPrefix(t *testing.T) { Descendants: 3, Children: padRightChildren([]*Node{ { // full key in nibbles 1, 2, 0, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, { // full key in nibbles 1, 2, 1, 6 Key: []byte{6}, Value: []byte("bottom branch"), Children: padRightChildren([]*Node{ { // full key in nibbles 1, 2, 1, 6, 0, 7 - Key: []byte{7}, + Key: []byte{7}, + Value: []byte{1}, }, }), }, @@ -2931,6 +2982,7 @@ func Test_Trie_ClearPrefix(t *testing.T) { expectedTrie: Trie{ root: &Node{ Key: []byte{1, 2, 0, 5}, + Value: []byte{1}, Dirty: true, }, }, @@ -2975,13 +3027,14 @@ func Test_Trie_clearPrefix(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -2990,14 +3043,16 @@ func Test_Trie_clearPrefix(t *testing.T) { "nil parent": {}, "leaf parent with common prefix": { parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1}, nodesRemoved: 1, }, "leaf parent with key equal prefix": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1}, nodesRemoved: 1, @@ -3007,11 +3062,13 @@ func Test_Trie_clearPrefix(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1, 3}, newParent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, }, "leaf parent with key smaller than prefix": { @@ -3019,11 +3076,13 @@ func Test_Trie_clearPrefix(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1, 2}, newParent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }, "branch parent with common prefix": { @@ -3125,8 +3184,8 @@ func Test_Trie_clearPrefix(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0, 3}, @@ -3138,7 +3197,7 @@ func Test_Trie_clearPrefix(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{4}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, nodesRemoved: 1, @@ -3152,7 +3211,7 @@ func Test_Trie_clearPrefix(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, @@ -3179,7 +3238,8 @@ func Test_Trie_clearPrefix(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 3, 0, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, }), }, @@ -3211,13 +3271,14 @@ func Test_Trie_clearPrefix(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, // full key 1, 0, 3 - {Key: []byte{4}}, // full key 1, 1, 4 + {Key: []byte{3}, Value: []byte{1}}, // full key 1, 0, 3 + {Key: []byte{4}, Value: []byte{1}}, // full key 1, 1, 4 }), }, prefix: []byte{1, 0, 3}, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3253,12 +3314,12 @@ func Test_Trie_Delete(t *testing.T) { }{ "nil key": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, }, "empty key": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, }, "empty trie": { @@ -3350,14 +3411,16 @@ func Test_Trie_delete(t *testing.T) { }, "leaf parent and nil key": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, updated: true, nodesRemoved: 1, }, "leaf parent and empty key": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{}, updated: true, @@ -3365,7 +3428,8 @@ func Test_Trie_delete(t *testing.T) { }, "leaf parent matches key": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{1}, updated: true, @@ -3376,11 +3440,13 @@ func Test_Trie_delete(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{2}, newParent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }, "branch parent and nil key": { @@ -3393,12 +3459,14 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, newParent: &Node{ Key: []byte{1, 0, 2}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3414,12 +3482,13 @@ func Test_Trie_delete(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{}, newParent: &Node{ Key: []byte{1, 0, 2}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3435,12 +3504,13 @@ func Test_Trie_delete(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{1}, newParent: &Node{ Key: []byte{1, 0, 2}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3457,7 +3527,8 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 2 - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, @@ -3503,7 +3574,8 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 2 - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, @@ -3514,7 +3586,8 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 2 - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, @@ -3556,8 +3629,8 @@ func Test_Trie_delete(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{1}, @@ -3567,8 +3640,8 @@ func Test_Trie_delete(t *testing.T) { Dirty: true, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, updated: true, @@ -3666,7 +3739,7 @@ func Test_handleDeletion(t *testing.T) { Generation: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{9}}, + {Key: []byte{9}, Value: []byte{1}}, }), }, newNode: &Node{ @@ -3675,7 +3748,7 @@ func Test_handleDeletion(t *testing.T) { Generation: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{9}}, + {Key: []byte{9}, Value: []byte{1}}, }), }, }, @@ -3710,9 +3783,9 @@ func Test_handleDeletion(t *testing.T) { Key: []byte{9}, Value: []byte{10}, Children: padRightChildren([]*Node{ - {Key: []byte{7}}, + {Key: []byte{7}, Value: []byte{1}}, nil, - {Key: []byte{8}}, + {Key: []byte{8}, Value: []byte{1}}, }), }, }), @@ -3723,9 +3796,9 @@ func Test_handleDeletion(t *testing.T) { Generation: 1, Dirty: true, Children: padRightChildren([]*Node{ - {Key: []byte{7}}, + {Key: []byte{7}, Value: []byte{1}}, nil, - {Key: []byte{8}}, + {Key: []byte{8}, Value: []byte{1}}, }), }, branchChildMerged: true,