diff --git a/Makefile b/Makefile index c29835f7e4..def65cc137 100644 --- a/Makefile +++ b/Makefile @@ -163,13 +163,14 @@ PB_CORE=$(shell go list -f {{.Dir}} -m github.com/tendermint/tendermint) PB_GOGO=$(shell go list -f {{.Dir}} -m github.com/gogo/protobuf) PB_CELESTIA_APP=$(shell go list -f {{.Dir}} -m github.com/celestiaorg/celestia-app) PB_NMT=$(shell go list -f {{.Dir}} -m github.com/celestiaorg/nmt) +PB_NODE=$(shell pwd) ## pb-gen: Generate protobuf code for all /pb/*.proto files in the project. pb-gen: @echo '--> Generating protobuf' @for dir in $(PB_PKGS); \ do for file in `find $$dir -type f -name "*.proto"`; \ - do protoc -I=. -I=${PB_CORE}/proto/ -I=${PB_GOGO} -I=${PB_CELESTIA_APP}/proto -I=${PB_NMT} --gogofaster_out=paths=source_relative:. $$file; \ + do protoc -I=. -I=${PB_CORE}/proto/ -I=${PB_NODE} -I=${PB_GOGO} -I=${PB_CELESTIA_APP}/proto -I=${PB_NMT} --gogofaster_out=paths=source_relative:. $$file; \ echo '-->' $$file; \ done; \ done; diff --git a/api/docgen/examples.go b/api/docgen/examples.go index f78d29543b..9b12994be9 100644 --- a/api/docgen/examples.go +++ b/api/docgen/examples.go @@ -79,7 +79,7 @@ var ExampleValues = map[reflect.Type]interface{}{ } func init() { - addToExampleValues(share.EmptyExtendedDataSquare()) + addToExampleValues(share.EmptyEDS()) addr, err := sdk.AccAddressFromBech32("celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h") if err != nil { panic(err) diff --git a/blob/service.go b/blob/service.go index 75500ea441..22a238ec97 100644 --- a/blob/service.go +++ b/blob/service.go @@ -27,6 +27,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" "github.com/celestiaorg/celestia-node/state" ) @@ -56,7 +57,7 @@ type Service struct { // accessor dials the given celestia-core endpoint to submit blobs. blobSubmitter Submitter // shareGetter retrieves the EDS to fetch all shares from the requested header. - shareGetter share.Getter + shareGetter shwap.Getter // headerGetter fetches header by the provided height headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error) // headerSub subscribes to new headers to supply to blob subscriptions. @@ -65,7 +66,7 @@ type Service struct { func NewService( submitter Submitter, - getter share.Getter, + getter shwap.Getter, headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error), headerSub func(ctx context.Context) (<-chan *header.ExtendedHeader, error), ) *Service { @@ -365,7 +366,7 @@ func (s *Service) retrieve( // collect shares for the requested namespace namespacedShares, err := s.shareGetter.GetSharesByNamespace(getCtx, header, namespace) if err != nil { - if errors.Is(err, share.ErrNotFound) { + if errors.Is(err, shwap.ErrNotFound) { err = ErrBlobNotFound } getSharesSpan.SetStatus(codes.Error, err.Error()) diff --git a/blob/service_test.go b/blob/service_test.go index defdac2312..b07bf5c092 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "sort" "testing" "time" @@ -20,22 +21,25 @@ import ( "github.com/celestiaorg/celestia-app/v2/pkg/appconsts" pkgproof "github.com/celestiaorg/celestia-app/v2/pkg/proof" + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" "github.com/celestiaorg/go-header/store" "github.com/celestiaorg/go-square/blob" "github.com/celestiaorg/go-square/inclusion" squarens "github.com/celestiaorg/go-square/namespace" - "github.com/celestiaorg/go-square/shares" + appshares "github.com/celestiaorg/go-square/shares" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" - shareMock "github.com/celestiaorg/celestia-node/nodebuilder/share/mocks" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/getters/mock" ) func TestBlobService_Get(t *testing.T) { @@ -58,7 +62,10 @@ func TestBlobService_Get(t *testing.T) { blobsWithSameNamespace, err := convertBlobs(appBlobs...) require.NoError(t, err) - service := createService(ctx, t, append(blobsWithDiffNamespaces, blobsWithSameNamespace...)) + blobs := slices.Concat(blobsWithDiffNamespaces, blobsWithSameNamespace) + shares, err := BlobsToShares(blobs...) + require.NoError(t, err) + service := createService(ctx, t, shares) test := []struct { name string doFn func() (interface{}, error) @@ -145,7 +152,7 @@ func TestBlobService_Get(t *testing.T) { require.True(t, bytes.Equal(sh, resultShares[shareOffset]), fmt.Sprintf("issue on %d attempt. ROW:%d, COL: %d, blobIndex:%d", i, row, col, blobs[i].index), ) - shareOffset += shares.SparseSharesNeeded(uint32(len(blobs[i].Data))) + shareOffset += appshares.SparseSharesNeeded(uint32(len(blobs[i].Data))) } }, }, @@ -403,21 +410,21 @@ func TestBlobService_Get(t *testing.T) { name: "internal error", doFn: func() (interface{}, error) { ctrl := gomock.NewController(t) - shareService := service.shareGetter - shareGetterMock := shareMock.NewMockModule(ctrl) - shareGetterMock.EXPECT(). + innerGetter := service.shareGetter + getterWrapper := mock.NewMockGetter(ctrl) + getterWrapper.EXPECT(). GetSharesByNamespace(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn( func( ctx context.Context, h *header.ExtendedHeader, ns share.Namespace, - ) (share.NamespacedShares, error) { + ) (shwap.NamespaceData, error) { if ns.Equals(blobsWithDiffNamespaces[0].Namespace()) { return nil, errors.New("internal error") } - return shareService.GetSharesByNamespace(ctx, h, ns) + return innerGetter.GetSharesByNamespace(ctx, h, ns) }).AnyTimes() - service.shareGetter = shareGetterMock + service.shareGetter = getterWrapper return service.GetAll(ctx, 1, []share.Namespace{ blobsWithDiffNamespaces[0].Namespace(), @@ -459,9 +466,9 @@ func TestService_GetSingleBlobWithoutPadding(t *testing.T) { ns1, ns2 := blobs[0].Namespace().ToAppNamespace(), blobs[1].Namespace().ToAppNamespace() - padding0, err := shares.NamespacePaddingShare(ns1, appconsts.ShareVersionZero) + padding0, err := appshares.NamespacePaddingShare(ns1, appconsts.ShareVersionZero) require.NoError(t, err) - padding1, err := shares.NamespacePaddingShare(ns2, appconsts.ShareVersionZero) + padding1, err := appshares.NamespacePaddingShare(ns2, appconsts.ShareVersionZero) require.NoError(t, err) rawShares0, err := BlobsToShares(blobs[0]) require.NoError(t, err) @@ -471,25 +478,7 @@ func TestService_GetSingleBlobWithoutPadding(t *testing.T) { rawShares := make([][]byte, 0) rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) - - bs := ipld.NewMemBlockservice() - batching := ds_sync.MutexWrap(ds.NewMapDatastore()) - headerStore, err := store.NewStore[*header.ExtendedHeader](batching) - require.NoError(t, err) - eds, err := ipld.AddShares(ctx, rawShares, bs) - require.NoError(t, err) - - h := headertest.ExtendedHeaderFromEDS(t, 1, eds) - err = headerStore.Init(ctx, h) - require.NoError(t, err) - - fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return headerStore.GetByHeight(ctx, height) - } - fn2 := func(ctx context.Context) (<-chan *header.ExtendedHeader, error) { - return nil, fmt.Errorf("not implemented") - } - service := NewService(nil, getters.NewIPLDGetter(bs), fn, fn2) + service := createService(ctx, t, rawShares) newBlob, err := service.Get(ctx, 1, blobs[1].Namespace(), blobs[1].Commitment) require.NoError(t, err) @@ -497,6 +486,9 @@ func TestService_GetSingleBlobWithoutPadding(t *testing.T) { resultShares, err := BlobsToShares(newBlob) require.NoError(t, err) + + h, err := service.headerGetter(ctx, 1) + require.NoError(t, err) row, col := calculateIndex(len(h.DAH.RowRoots), newBlob.index) sh, err := service.shareGetter.GetShare(ctx, h, row, col) require.NoError(t, err) @@ -515,7 +507,9 @@ func TestService_Get(t *testing.T) { blobs, err := convertBlobs(appBlobs...) require.NoError(t, err) - service := createService(ctx, t, blobs) + shares, err := BlobsToShares(blobs...) + require.NoError(t, err) + service := createService(ctx, t, shares) h, err := service.headerGetter(ctx, 1) require.NoError(t, err) @@ -534,7 +528,7 @@ func TestService_Get(t *testing.T) { require.NoError(t, err) assert.Equal(t, sh, resultShares[shareOffset], fmt.Sprintf("issue on %d attempt", i)) - shareOffset += shares.SparseSharesNeeded(uint32(len(blob.Data))) + shareOffset += appshares.SparseSharesNeeded(uint32(len(blob.Data))) } } @@ -555,7 +549,7 @@ func TestService_GetAllWithoutPadding(t *testing.T) { rawShares = make([][]byte, 0) ) - padding, err := shares.NamespacePaddingShare(ns, appconsts.ShareVersionZero) + padding, err := appshares.NamespacePaddingShare(ns, appconsts.ShareVersionZero) require.NoError(t, err) for i := 0; i < 2; i++ { @@ -575,21 +569,7 @@ func TestService_GetAllWithoutPadding(t *testing.T) { sh, err = BlobsToShares(blobs[4]) require.NoError(t, err) rawShares = append(rawShares, sh...) - - bs := ipld.NewMemBlockservice() - require.NoError(t, err) - eds, err := ipld.AddShares(ctx, rawShares, bs) - require.NoError(t, err) - - h := headertest.ExtendedHeaderFromEDS(t, 1, eds) - - fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return h, nil - } - fn2 := func(ctx context.Context) (<-chan *header.ExtendedHeader, error) { - return nil, fmt.Errorf("not implemented") - } - service := NewService(nil, getters.NewIPLDGetter(bs), fn, fn2) + service := createService(ctx, t, rawShares) newBlobs, err := service.GetAll(ctx, 1, []share.Namespace{blobs[0].Namespace()}) require.NoError(t, err) @@ -598,6 +578,8 @@ func TestService_GetAllWithoutPadding(t *testing.T) { resultShares, err := BlobsToShares(newBlobs...) require.NoError(t, err) + h, err := service.headerGetter(ctx, 1) + require.NoError(t, err) shareOffset := 0 for i, blob := range newBlobs { require.True(t, blobs[i].compareCommitments(blob.Commitment)) @@ -607,14 +589,14 @@ func TestService_GetAllWithoutPadding(t *testing.T) { require.NoError(t, err) assert.Equal(t, sh, resultShares[shareOffset]) - shareOffset += shares.SparseSharesNeeded(uint32(len(blob.Data))) + shareOffset += appshares.SparseSharesNeeded(uint32(len(blob.Data))) } } func TestAllPaddingSharesInEDS(t *testing.T) { nid, err := share.NewBlobNamespaceV0(tmrand.Bytes(7)) require.NoError(t, err) - padding, err := shares.NamespacePaddingShare(nid.ToAppNamespace(), appconsts.ShareVersionZero) + padding, err := appshares.NamespacePaddingShare(nid.ToAppNamespace(), appconsts.ShareVersionZero) require.NoError(t, err) rawShares := make([]share.Share, 16) @@ -625,20 +607,7 @@ func TestAllPaddingSharesInEDS(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - bs := ipld.NewMemBlockservice() - require.NoError(t, err) - eds, err := ipld.AddShares(ctx, rawShares, bs) - require.NoError(t, err) - - h := headertest.ExtendedHeaderFromEDS(t, 1, eds) - - fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return h, nil - } - fn2 := func(ctx context.Context) (<-chan *header.ExtendedHeader, error) { - return nil, fmt.Errorf("not implemented") - } - service := NewService(nil, getters.NewIPLDGetter(bs), fn, fn2) + service := createService(ctx, t, rawShares) newBlobs, err := service.GetAll(ctx, 1, []share.Namespace{nid}) require.NoError(t, err) assert.Empty(t, newBlobs) @@ -647,7 +616,7 @@ func TestAllPaddingSharesInEDS(t *testing.T) { func TestSkipPaddingsAndRetrieveBlob(t *testing.T) { nid, err := share.NewBlobNamespaceV0(tmrand.Bytes(7)) require.NoError(t, err) - padding, err := shares.NamespacePaddingShare(nid.ToAppNamespace(), appconsts.ShareVersionZero) + padding, err := appshares.NamespacePaddingShare(nid.ToAppNamespace(), appconsts.ShareVersionZero) require.NoError(t, err) rawShares := make([]share.Share, 0, 64) @@ -670,20 +639,7 @@ func TestSkipPaddingsAndRetrieveBlob(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - bs := ipld.NewMemBlockservice() - require.NoError(t, err) - eds, err := ipld.AddShares(ctx, rawShares, bs) - require.NoError(t, err) - - h := headertest.ExtendedHeaderFromEDS(t, 1, eds) - - fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return h, nil - } - fn2 := func(ctx context.Context) (<-chan *header.ExtendedHeader, error) { - return nil, fmt.Errorf("not implemented") - } - service := NewService(nil, getters.NewIPLDGetter(bs), fn, fn2) + service := createService(ctx, t, rawShares) newBlob, err := service.GetAll(ctx, 1, []share.Namespace{nid}) require.NoError(t, err) require.Len(t, newBlob, 1) @@ -740,7 +696,7 @@ func TestService_Subscribe(t *testing.T) { t.Run("subscription cancellation", func(t *testing.T) { ns := blobs[0].Namespace() - subCtx, subCancel := context.WithCancel(ctx) + subCtx, subCancel := context.WithTimeout(ctx, time.Second*2) subCh, err := service.Subscribe(subCtx, ns) require.NoError(t, err) @@ -748,15 +704,20 @@ func TestService_Subscribe(t *testing.T) { select { case <-subCh: subCancel() - case <-time.After(time.Second * 2): + case <-ctx.Done(): t.Fatal("timeout waiting for first subscription response") } - select { - case _, ok := <-subCh: - assert.False(t, ok, "expected subscription channel to be closed") - case <-time.After(time.Second * 2): - t.Fatal("timeout waiting for subscription channel to close") + for { + select { + case _, ok := <-subCh: + if !ok { + // channel closed as expected + return + } + case <-ctx.Done(): + t.Fatal("timeout waiting for subscription channel to close") + } } }) @@ -858,7 +819,9 @@ func BenchmarkGetByCommitment(b *testing.B) { blobs, err := convertBlobs(appBlobs...) require.NoError(b, err) - service := createService(ctx, b, blobs) + shares, err := BlobsToShares(blobs...) + require.NoError(b, err) + service := createService(ctx, b, shares) indexer := &parser{} b.ResetTimer() for i := 0; i < b.N; i++ { @@ -910,20 +873,46 @@ func createServiceWithSub(ctx context.Context, t testing.TB, blobs []*Blob) *Ser }() return headerChan, nil } - return NewService(nil, getters.NewIPLDGetter(bs), fn, fn2) + ctrl := gomock.NewController(t) + shareGetter := mock.NewMockGetter(ctrl) + + shareGetter.EXPECT().GetSharesByNamespace(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes(). + DoAndReturn(func(ctx context.Context, h *header.ExtendedHeader, ns share.Namespace) (shwap.NamespaceData, error) { + idx := int(h.Height()) - 1 + accessor := &eds.Rsmt2D{ExtendedDataSquare: edsses[idx]} + nd, err := eds.NamespaceData(ctx, accessor, ns) + return nd, err + }) + return NewService(nil, shareGetter, fn, fn2) } -func createService(ctx context.Context, t testing.TB, blobs []*Blob) *Service { - bs := ipld.NewMemBlockservice() +func createService(ctx context.Context, t testing.TB, shares []share.Share) *Service { + odsSize := int(utils.SquareSize(len(shares))) + square, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsSize))) + require.NoError(t, err) + + accessor := &eds.Rsmt2D{ExtendedDataSquare: square} + ctrl := gomock.NewController(t) + shareGetter := mock.NewMockGetter(ctrl) + shareGetter.EXPECT().GetSharesByNamespace(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes(). + DoAndReturn(func(ctx context.Context, h *header.ExtendedHeader, ns share.Namespace) (shwap.NamespaceData, error) { + nd, err := eds.NamespaceData(ctx, accessor, ns) + return nd, err + }) + shareGetter.EXPECT().GetShare(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes(). + DoAndReturn(func(ctx context.Context, h *header.ExtendedHeader, row, col int) (share.Share, error) { + s, err := accessor.Sample(ctx, row, col) + return s.Share, err + }) + + // create header and put it into the store + h := headertest.ExtendedHeaderFromEDS(t, 1, square) batching := ds_sync.MutexWrap(ds.NewMapDatastore()) headerStore, err := store.NewStore[*header.ExtendedHeader](batching) require.NoError(t, err) - rawShares, err := BlobsToShares(blobs...) - require.NoError(t, err) - eds, err := ipld.AddShares(ctx, rawShares, bs) - require.NoError(t, err) - - h := headertest.ExtendedHeaderFromEDS(t, 1, eds) err = headerStore.Init(ctx, h) require.NoError(t, err) @@ -933,7 +922,7 @@ func createService(ctx context.Context, t testing.TB, blobs []*Blob) *Service { fn2 := func(ctx context.Context) (<-chan *header.ExtendedHeader, error) { return nil, fmt.Errorf("not implemented") } - return NewService(nil, getters.NewIPLDGetter(bs), fn, fn2) + return NewService(nil, shareGetter, fn, fn2) } // TestProveCommitmentAllCombinations tests proving all the commitments in a block. @@ -1035,7 +1024,7 @@ func generateCommitmentProofFromBlock( sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( eds, ns.ToAppNamespace(), - shares.NewRange(startShareIndex, startShareIndex+len(blobShares)), + appshares.NewRange(startShareIndex, startShareIndex+len(blobShares)), ) require.NoError(t, err) require.NoError(t, sharesProof.Validate(dataRoot)) diff --git a/cmd/cel-shed/eds_store_stress.go b/cmd/cel-shed/eds_store_stress.go index 9036a81e30..1150686a03 100644 --- a/cmd/cel-shed/eds_store_stress.go +++ b/cmd/cel-shed/eds_store_stress.go @@ -23,6 +23,7 @@ import ( const ( edsStorePathFlag = "path" edsWritesFlag = "writes" + edsWriteFrom = "init_height" edsSizeFlag = "size" edsDisableLogFlag = "disable-log" edsLogStatFreqFlag = "log-stat-freq" @@ -119,12 +120,14 @@ var edsStoreStress = &cobra.Command{ disableLog, _ := cmd.Flags().GetBool(edsDisableLogFlag) logFreq, _ := cmd.Flags().GetInt(edsLogStatFreqFlag) edsWrites, _ := cmd.Flags().GetInt(edsWritesFlag) + writeFrom, _ := cmd.Flags().GetInt(edsWriteFrom) edsSize, _ := cmd.Flags().GetInt(edsSizeFlag) putTimeout, _ := cmd.Flags().GetInt(putTimeoutFlag) cfg := edssser.Config{ EDSSize: edsSize, EDSWrites: edsWrites, + WriteFrom: writeFrom, EnableLog: !disableLog, LogFilePath: path, StatLogFreq: logFreq, @@ -144,12 +147,7 @@ var edsStoreStress = &cobra.Command{ err = errors.Join(err, nodestore.Close()) }() - datastore, err := nodestore.Datastore() - if err != nil { - return err - } - - stresser, err := edssser.NewEDSsser(path, datastore, cfg) + stresser, err := edssser.NewEDSsser(path, cfg) if err != nil { return err } diff --git a/cmd/cel-shed/main.go b/cmd/cel-shed/main.go index 872bbb48a9..f54b50e886 100644 --- a/cmd/cel-shed/main.go +++ b/cmd/cel-shed/main.go @@ -10,7 +10,7 @@ import ( ) func init() { - rootCmd.AddCommand(p2pCmd, headerCmd, edsStoreCmd) + rootCmd.AddCommand(p2pCmd, headerCmd, edsStoreCmd, shwapCmd) } var rootCmd = &cobra.Command{ diff --git a/cmd/cel-shed/shwap.go b/cmd/cel-shed/shwap.go new file mode 100644 index 0000000000..2d88e59ca9 --- /dev/null +++ b/cmd/cel-shed/shwap.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + "reflect" + + "github.com/ipfs/go-cid" + "github.com/spf13/cobra" + + "github.com/celestiaorg/celestia-node/share/shwap/p2p/bitswap" +) + +func init() { + shwapCmd.AddCommand(shwapCIDType) +} + +var shwapCmd = &cobra.Command{ + Use: "shwap [subcommand]", + Short: "Collection of shwap related utilities", +} + +var shwapCIDType = &cobra.Command{ + Use: "cid-type", + Short: "Decodes Bitswap CID composed over Shwap CID", + RunE: func(_ *cobra.Command, args []string) error { + cid, err := cid.Decode(args[0]) + if err != nil { + return fmt.Errorf("decoding cid: %w", err) + } + + blk, err := bitswap.EmptyBlock(cid) + if err != nil { + return fmt.Errorf("building block: %w", err) + } + + fmt.Printf("%s: %+v\n", reflect.TypeOf(blk), blk) + return nil + }, + Args: cobra.ExactArgs(1), +} diff --git a/core/eds.go b/core/eds.go index bb53178e09..a069ca7cec 100644 --- a/core/eds.go +++ b/core/eds.go @@ -2,10 +2,8 @@ package core import ( "context" - "errors" "fmt" - "github.com/filecoin-project/dagstore" "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/v2/app" @@ -18,9 +16,9 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/pruner/full" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/store" ) // extendBlock extends the given block data, returning the resulting @@ -28,7 +26,7 @@ import ( // nil is returned in place of the eds. func extendBlock(data types.Data, appVersion uint64, options ...nmt.Option) (*rsmt2d.ExtendedDataSquare, error) { if app.IsEmptyBlock(data, appVersion) { - return share.EmptyExtendedDataSquare(), nil + return share.EmptyEDS(), nil } // Construct the data square from the block's transactions @@ -62,25 +60,20 @@ func storeEDS( ctx context.Context, eh *header.ExtendedHeader, eds *rsmt2d.ExtendedDataSquare, - adder *ipld.ProofsAdder, - store *eds.Store, + store *store.Store, window pruner.AvailabilityWindow, ) error { - if eds.Equals(share.EmptyExtendedDataSquare()) { - return nil - } - if !pruner.IsWithinAvailabilityWindow(eh.Time(), window) { log.Debugw("skipping storage of historic block", "height", eh.Height()) return nil } - ctx = ipld.CtxWithProofsAdder(ctx, adder) - - err := store.Put(ctx, share.DataHash(eh.DataHash), eds) - if errors.Is(err, dagstore.ErrShardExists) { - // block with given root already exists, return nil - return nil + var err error + // archival nodes should not store Q4 outside the availability window. + if pruner.IsWithinAvailabilityWindow(eh.Time(), full.Window) { + err = store.PutODSQ4(ctx, eh.DAH, eh.Height(), eds) + } else { + err = store.PutODS(ctx, eh.DAH, eh.Height(), eds) } if err == nil { log.Debugw("stored EDS for height", "height", eh.Height()) diff --git a/core/eds_test.go b/core/eds_test.go index f6df18c4be..462561852a 100644 --- a/core/eds_test.go +++ b/core/eds_test.go @@ -24,11 +24,11 @@ func TestTrulyEmptySquare(t *testing.T) { eds, err := extendBlock(data, appconsts.LatestVersion) require.NoError(t, err) - require.True(t, eds.Equals(share.EmptyExtendedDataSquare())) + require.True(t, eds.Equals(share.EmptyEDS())) } -// TestEmptySquareWithZeroTxs tests that the DAH hash of a block with no transactions -// is equal to the DAH hash for an empty root even if SquareSize is set to +// TestEmptySquareWithZeroTxs tests that the datahash of a block with no transactions +// is equal to the datahash of an empty eds, even if SquareSize is set to // something non-zero. Technically, this block data is invalid because the // construction of the square is deterministic, and the rules which dictate the // square size do not allow for empty block data. However, should that ever @@ -40,13 +40,13 @@ func TestEmptySquareWithZeroTxs(t *testing.T) { eds, err := extendBlock(data, appconsts.LatestVersion) require.NoError(t, err) - require.True(t, eds.Equals(share.EmptyExtendedDataSquare())) + require.True(t, eds.Equals(share.EmptyEDS())) // force extend the square using an empty block and compare with the min DAH eds, err = app.ExtendBlock(data, appconsts.LatestVersion) require.NoError(t, err) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - assert.Equal(t, share.EmptyRoot().Hash(), dah.Hash()) + assert.Equal(t, share.EmptyEDSRoots().Hash(), roots.Hash()) } diff --git a/core/exchange.go b/core/exchange.go index 6593111e5f..7e5a0ef359 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -9,19 +9,17 @@ import ( "golang.org/x/sync/errgroup" libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/pruner" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/store" ) -const concurrencyLimit = 4 +const concurrencyLimit = 16 type Exchange struct { fetcher *BlockFetcher - store *eds.Store + store *store.Store construct header.ConstructFn availabilityWindow pruner.AvailabilityWindow @@ -31,7 +29,7 @@ type Exchange struct { func NewExchange( fetcher *BlockFetcher, - store *eds.Store, + store *store.Store, construct header.ConstructFn, opts ...Option, ) (*Exchange, error) { @@ -134,11 +132,7 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende return nil, fmt.Errorf("fetching block info for height %d: %w", &block.Height, err) } - // extend block data - adder := ipld.NewProofsAdder(int(block.Data.SquareSize)) - defer adder.Purge() - - eds, err := extendBlock(block.Data, block.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + eds, err := extendBlock(block.Data, block.Header.Version.App) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", &block.Height, err) } @@ -153,7 +147,7 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende &block.Height, hash, eh.Hash()) } - err = storeEDS(ctx, eh, eds, adder, ce.store, ce.availabilityWindow) + err = storeEDS(ctx, eh, eds, ce.store, ce.availabilityWindow) if err != nil { return nil, err } @@ -179,11 +173,7 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 } log.Debugw("fetched signed block from core", "height", b.Header.Height) - // extend block data - adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) - defer adder.Purge() - - eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + eds, err := extendBlock(b.Data, b.Header.Version.App) if err != nil { return nil, fmt.Errorf("extending block data for height %d: %w", b.Header.Height, err) } @@ -193,7 +183,7 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 panic(fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err)) } - err = storeEDS(ctx, eh, eds, adder, ce.store, ce.availabilityWindow) + err = storeEDS(ctx, eh, eds, ce.store, ce.availabilityWindow) if err != nil { return nil, err } diff --git a/core/exchange_test.go b/core/exchange_test.go index fc43121425..fb3795d380 100644 --- a/core/exchange_test.go +++ b/core/exchange_test.go @@ -7,8 +7,6 @@ import ( "time" "github.com/cosmos/cosmos-sdk/client/flags" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,7 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store" ) func TestCoreExchange_RequestHeaders(t *testing.T) { @@ -29,7 +27,8 @@ func TestCoreExchange_RequestHeaders(t *testing.T) { generateNonEmptyBlocks(t, ctx, fetcher, cfg, cctx) - store := createStore(t) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) ce, err := NewExchange(fetcher, store, header.MakeExtendedHeader) require.NoError(t, err) @@ -55,7 +54,11 @@ func TestCoreExchange_RequestHeaders(t *testing.T) { assert.Equal(t, expectedLastHeightInRange, headers[len(headers)-1].Height()) for _, h := range headers { - has, err := store.Has(ctx, h.DAH.Hash()) + has, err := store.HasByHash(ctx, h.DAH.Hash()) + require.NoError(t, err) + assert.True(t, has) + + has, err = store.HasByHeight(ctx, h.Height()) require.NoError(t, err) assert.True(t, has) } @@ -72,7 +75,8 @@ func TestExchange_DoNotStoreHistoric(t *testing.T) { generateNonEmptyBlocks(t, ctx, fetcher, cfg, cctx) - store := createStore(t) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) ce, err := NewExchange( fetcher, @@ -94,10 +98,15 @@ func TestExchange_DoNotStoreHistoric(t *testing.T) { // ensure none of the "historic" EDSs were stored for _, h := range headers { - if bytes.Equal(h.DataHash, share.EmptyRoot().Hash()) { + has, err := store.HasByHeight(ctx, h.Height()) + require.NoError(t, err) + assert.False(t, has) + + // empty EDSs are expected to exist in the store, so we skip them + if h.DAH.Equals(share.EmptyEDSRoots()) { continue } - has, err := store.Has(ctx, h.DAH.Hash()) + has, err = store.HasByHash(ctx, h.DAH.Hash()) require.NoError(t, err) assert.False(t, has) } @@ -112,32 +121,6 @@ func createCoreFetcher(t *testing.T, cfg *testnode.Config) (*BlockFetcher, testn return NewBlockFetcher(cctx.Client), cctx } -func createStore(t *testing.T) *eds.Store { - t.Helper() - - storeCfg := eds.DefaultParameters() - store, err := eds.NewStore(storeCfg, t.TempDir(), ds_sync.MutexWrap(ds.NewMapDatastore())) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - err = store.Start(ctx) - require.NoError(t, err) - - // store an empty square to initialize EDS store - eds := share.EmptyExtendedDataSquare() - err = store.Put(ctx, share.EmptyRoot().Hash(), eds) - require.NoError(t, err) - - t.Cleanup(func() { - err = store.Stop(ctx) - require.NoError(t, err) - }) - - return store -} - // fillBlocks fills blocks until the context is canceled. func fillBlocks( t *testing.T, @@ -185,10 +168,11 @@ func generateNonEmptyBlocks( case b, ok := <-sub: require.True(t, ok) - if !bytes.Equal(b.Data.Hash(), share.EmptyRoot().Hash()) { - hashes = append(hashes, share.DataHash(b.Data.Hash())) - i++ + if bytes.Equal(share.EmptyEDSDataHash(), b.Data.Hash()) { + continue } + hashes = append(hashes, share.DataHash(b.Data.Hash())) + i++ case <-ctx.Done(): t.Fatal("failed to fill blocks within timeout") } diff --git a/core/header_test.go b/core/header_test.go index be4521d609..bcf3f177c9 100644 --- a/core/header_test.go +++ b/core/header_test.go @@ -40,7 +40,7 @@ func TestMakeExtendedHeaderForEmptyBlock(t *testing.T) { headerExt, err := header.MakeExtendedHeader(&b.Header, comm, val, eds) require.NoError(t, err) - assert.Equal(t, share.EmptyRoot(), headerExt.DAH) + assert.Equal(t, share.EmptyEDSRoots(), headerExt.DAH) } func TestMismatchedDataHash_ComputedRoot(t *testing.T) { diff --git a/core/listener.go b/core/listener.go index 5260067154..ece30188fa 100644 --- a/core/listener.go +++ b/core/listener.go @@ -12,13 +12,11 @@ import ( "go.opentelemetry.io/otel/attribute" libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/pruner" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" + "github.com/celestiaorg/celestia-node/store" ) var ( @@ -39,7 +37,7 @@ type Listener struct { fetcher *BlockFetcher construct header.ConstructFn - store *eds.Store + store *store.Store availabilityWindow pruner.AvailabilityWindow headerBroadcaster libhead.Broadcaster[*header.ExtendedHeader] @@ -58,7 +56,7 @@ func NewListener( fetcher *BlockFetcher, hashBroadcaster shrexsub.BroadcastFn, construct header.ConstructFn, - store *eds.Store, + store *store.Store, blocktime time.Duration, opts ...Option, ) (*Listener, error) { @@ -214,11 +212,8 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS span.SetAttributes( attribute.Int64("height", b.Header.Height), ) - // extend block data - adder := ipld.NewProofsAdder(int(b.Data.SquareSize)) - defer adder.Purge() - eds, err := extendBlock(b.Data, b.Header.Version.App, nmt.NodeVisitor(adder.VisitFn())) + eds, err := extendBlock(b.Data, b.Header.Version.App) if err != nil { return fmt.Errorf("extending block data: %w", err) } @@ -229,7 +224,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS panic(fmt.Errorf("making extended header: %w", err)) } - err = storeEDS(ctx, eh, eds, adder, cl.store, cl.availabilityWindow) + err = storeEDS(ctx, eh, eds, cl.store, cl.availabilityWindow) if err != nil { return fmt.Errorf("storing EDS: %w", err) } diff --git a/core/listener_no_race_test.go b/core/listener_no_race_test.go index 51b1abe4ca..b7d26fba36 100644 --- a/core/listener_no_race_test.go +++ b/core/listener_no_race_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/store" ) // TestListenerWithNonEmptyBlocks ensures that non-empty blocks are actually @@ -28,11 +29,12 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { fetcher, cctx := createCoreFetcher(t, cfg) eds := createEdsPubSub(ctx, t) - store := createStore(t) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) // create Listener and start listening cl := createListener(ctx, t, fetcher, ps0, eds, store, testChainID) - err := cl.Start(ctx) + err = cl.Start(ctx) require.NoError(t, err) // listen for eds hashes broadcasted through eds-sub and ensure store has @@ -41,7 +43,7 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { require.NoError(t, err) t.Cleanup(sub.Cancel) - empty := share.EmptyRoot() + empty := share.EmptyEDSRoots() // TODO extract 16 for i := 0; i < 16; i++ { accounts := cfg.Genesis.Accounts() @@ -55,7 +57,11 @@ func TestListenerWithNonEmptyBlocks(t *testing.T) { continue } - has, err := store.Has(ctx, msg.DataHash) + has, err := store.HasByHash(ctx, msg.DataHash) + require.NoError(t, err) + require.True(t, has) + + has, err = store.HasByHeight(ctx, msg.Height) require.NoError(t, err) require.True(t, has) } diff --git a/core/listener_test.go b/core/listener_test.go index 60b6600468..252c6e88c1 100644 --- a/core/listener_test.go +++ b/core/listener_test.go @@ -16,8 +16,8 @@ import ( "github.com/celestiaorg/celestia-node/header" nodep2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/pruner" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" + "github.com/celestiaorg/celestia-node/store" ) const testChainID = "private" @@ -52,7 +52,9 @@ func TestListener(t *testing.T) { eds := createEdsPubSub(ctx, t) // create Listener and start listening - cl := createListener(ctx, t, fetcher, ps0, eds, createStore(t), testChainID) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) + cl := createListener(ctx, t, fetcher, ps0, eds, store, testChainID) err = cl.Start(ctx) require.NoError(t, err) @@ -84,7 +86,8 @@ func TestListenerWithWrongChainRPC(t *testing.T) { fetcher, _ := createCoreFetcher(t, cfg) eds := createEdsPubSub(ctx, t) - store := createStore(t) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) // create Listener and start listening cl := createListener(ctx, t, fetcher, ps0, eds, store, "wrong-chain-rpc") @@ -111,7 +114,8 @@ func TestListener_DoesNotStoreHistoric(t *testing.T) { fetcher, cctx := createCoreFetcher(t, cfg) eds := createEdsPubSub(ctx, t) - store := createStore(t) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) // create Listener and start listening opt := WithAvailabilityWindow(pruner.AvailabilityWindow(time.Nanosecond)) @@ -119,12 +123,12 @@ func TestListener_DoesNotStoreHistoric(t *testing.T) { dataRoots := generateNonEmptyBlocks(t, ctx, fetcher, cfg, cctx) - err := cl.Start(ctx) + err = cl.Start(ctx) require.NoError(t, err) // ensure none of the EDSes were stored for _, hash := range dataRoots { - has, err := store.Has(ctx, hash) + has, err := store.HasByHash(ctx, hash) require.NoError(t, err) assert.False(t, has) } @@ -171,7 +175,7 @@ func createListener( fetcher *BlockFetcher, ps *pubsub.PubSub, edsSub *shrexsub.PubSub, - store *eds.Store, + store *store.Store, chainID string, opts ...Option, ) *Listener { diff --git a/das/coordinator.go b/das/coordinator.go index 852a40d24d..aff41bac8c 100644 --- a/das/coordinator.go +++ b/das/coordinator.go @@ -8,7 +8,7 @@ import ( libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) // samplingCoordinator runs and coordinates sampling workers and updates current sampling state diff --git a/das/coordinator_test.go b/das/coordinator_test.go index 18e707ba0d..a94a9a4e6f 100644 --- a/das/coordinator_test.go +++ b/das/coordinator_test.go @@ -14,7 +14,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) func TestCoordinator(t *testing.T) { @@ -432,7 +432,7 @@ func (m *mockSampler) discover(ctx context.Context, newHeight uint64, emit liste emit(ctx, &header.ExtendedHeader{ Commit: &types.Commit{}, RawHeader: header.RawHeader{Height: int64(newHeight)}, - DAH: &share.Root{RowRoots: make([][]byte, 0)}, + DAH: &share.AxisRoots{RowRoots: make([][]byte, 0)}, }) } diff --git a/das/daser.go b/das/daser.go index f89786535b..2b0b84ba29 100644 --- a/das/daser.go +++ b/das/daser.go @@ -16,7 +16,7 @@ import ( "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) var log = logging.Logger("das") diff --git a/das/daser_test.go b/das/daser_test.go index f4e0150937..c67e5c06e2 100644 --- a/das/daser_test.go +++ b/das/daser_test.go @@ -2,37 +2,27 @@ package das import ( "context" + "fmt" "strconv" "testing" "time" "github.com/golang/mock/gomock" - "github.com/ipfs/boxo/blockservice" "github.com/ipfs/go-datastore" ds_sync "github.com/ipfs/go-datastore/sync" - pubsub "github.com/libp2p/go-libp2p-pubsub" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/types" - "github.com/celestiaorg/go-fraud" - "github.com/celestiaorg/go-fraud/fraudserv" "github.com/celestiaorg/go-fraud/fraudtest" libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" - headerfraud "github.com/celestiaorg/celestia-node/header/headertest/fraud" "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/availability/full" - "github.com/celestiaorg/celestia-node/share/availability/light" "github.com/celestiaorg/celestia-node/share/availability/mocks" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/eds/edstest" ) var timeout = time.Second * 15 @@ -41,10 +31,11 @@ var timeout = time.Second * 15 // the DASer checkpoint is updated to network head. func TestDASerLifecycle(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := ipld.NewMemBlockservice() - avail := light.TestAvailability(getters.NewIPLDGetter(bServ)) + ctrl := gomock.NewController(t) + avail := mocks.NewMockAvailability(ctrl) + avail.EXPECT().SharesAvailable(gomock.Any(), gomock.Any()).AnyTimes().Return(nil) // 15 headers from the past and 15 future headers - mockGet, sub, mockService := createDASerSubcomponents(t, bServ, 15, 15) + mockGet, sub, mockService := createDASerSubcomponents(t, 15, 15) ctx, cancel := context.WithTimeout(context.Background(), timeout) t.Cleanup(cancel) @@ -62,7 +53,7 @@ func TestDASerLifecycle(t *testing.T) { checkpoint, err := daser.store.load(ctx) require.NoError(t, err) // ensure checkpoint is stored at 30 - assert.EqualValues(t, 30, checkpoint.SampleFrom-1) + require.EqualValues(t, 30, checkpoint.SampleFrom-1) }() // wait for mock to indicate that catchup is done @@ -73,18 +64,16 @@ func TestDASerLifecycle(t *testing.T) { } // wait for DASer to indicate done - assert.NoError(t, daser.WaitCatchUp(ctx)) - - // give catch-up routine a second to finish up sampling last header - assert.NoError(t, daser.sampler.state.waitCatchUp(ctx)) + require.NoError(t, waitHeight(ctx, daser, 30)) } func TestDASer_Restart(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := ipld.NewMemBlockservice() - avail := light.TestAvailability(getters.NewIPLDGetter(bServ)) + ctrl := gomock.NewController(t) + avail := mocks.NewMockAvailability(ctrl) + avail.EXPECT().SharesAvailable(gomock.Any(), gomock.Any()).AnyTimes().Return(nil) // 15 headers from the past and 15 future headers - mockGet, sub, mockService := createDASerSubcomponents(t, bServ, 15, 15) + mockGet, sub, mockService := createDASerSubcomponents(t, 15, 15) ctx, cancel := context.WithTimeout(context.Background(), timeout) t.Cleanup(cancel) @@ -103,16 +92,16 @@ func TestDASer_Restart(t *testing.T) { } // wait for DASer to indicate done - assert.NoError(t, daser.WaitCatchUp(ctx)) + require.NoError(t, waitHeight(ctx, daser, 30)) err = daser.Stop(ctx) require.NoError(t, err) // reset mockGet, generate 15 "past" headers, building off chain head which is 30 - mockGet.generateHeaders(t, bServ, 30, 45) + mockGet.generateHeaders(t, 30, 45) mockGet.doneCh = make(chan struct{}) // reset dummy subscriber - mockGet.fillSubWithHeaders(t, sub, bServ, 45, 60) + mockGet.fillSubWithHeaders(t, sub, 45, 60) // manually set mockGet head to trigger finished at 45 mockGet.head = int64(45) @@ -133,7 +122,7 @@ func TestDASer_Restart(t *testing.T) { case <-mockGet.doneCh: } - assert.NoError(t, daser.sampler.state.waitCatchUp(ctx)) + require.NoError(t, waitHeight(ctx, daser, 60)) err = daser.Stop(restartCtx) require.NoError(t, err) @@ -144,73 +133,76 @@ func TestDASer_Restart(t *testing.T) { assert.EqualValues(t, 60, checkpoint.SampleFrom-1) } -func TestDASer_stopsAfter_BEFP(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) - t.Cleanup(cancel) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - bServ := ipld.NewMemBlockservice() - // create mock network - net, err := mocknet.FullMeshLinked(1) - require.NoError(t, err) - // create pubsub for host - ps, err := pubsub.NewGossipSub(ctx, net.Hosts()[0], - pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) - require.NoError(t, err) - avail := full.TestAvailability(t, getters.NewIPLDGetter(bServ)) - // 15 headers from the past and 15 future headers - mockGet, sub, _ := createDASerSubcomponents(t, bServ, 15, 15) - - // create fraud service and break one header - getter := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { - return mockGet.GetByHeight(ctx, height) - } - headGetter := func(ctx context.Context) (*header.ExtendedHeader, error) { - return mockGet.Head(ctx) - } - unmarshaler := fraud.MultiUnmarshaler[*header.ExtendedHeader]{ - Unmarshalers: map[fraud.ProofType]func([]byte) (fraud.Proof[*header.ExtendedHeader], error){ - byzantine.BadEncoding: func(data []byte) (fraud.Proof[*header.ExtendedHeader], error) { - befp := &byzantine.BadEncodingProof{} - return befp, befp.UnmarshalBinary(data) - }, - }, - } - - fserv := fraudserv.NewProofService[*header.ExtendedHeader](ps, - net.Hosts()[0], - getter, - headGetter, - unmarshaler, - ds, - false, - "private", - ) - require.NoError(t, fserv.Start(ctx)) - mockGet.headers[1] = headerfraud.CreateFraudExtHeader(t, mockGet.headers[1], bServ) - newCtx := context.Background() - - // create and start DASer - daser, err := NewDASer(avail, sub, mockGet, ds, fserv, newBroadcastMock(1)) - require.NoError(t, err) - - resultCh := make(chan error) - go fraud.OnProof[*header.ExtendedHeader](newCtx, fserv, byzantine.BadEncoding, - func(fraud.Proof[*header.ExtendedHeader]) { - resultCh <- daser.Stop(newCtx) - }) - - require.NoError(t, daser.Start(newCtx)) - // wait for fraud proof will be handled - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - case res := <-resultCh: - require.NoError(t, res) - } - // wait for manager to finish catchup - require.False(t, daser.running.Load()) -} +// TODO(@walldiss): BEFP test will not work until BEFP-shwap integration +// func TestDASer_stopsAfter_BEFP(t *testing.T) { +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) +// t.Cleanup(cancel) +// +// ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) +// // create mock network +// net, err := mocknet.FullMeshLinked(1) +// require.NoError(t, err) +// // create pubsub for host +// ps, err := pubsub.NewGossipSub(ctx, net.Hosts()[0], +// pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign)) +// require.NoError(t, err) +// +// ctrl := gomock.NewController(t) +// avail := mocks.NewMockAvailability(ctrl) +// avail.EXPECT().SharesAvailable(gomock.Any(), gomock.Any()).AnyTimes().Return(nil) +// // 15 headers from the past and 15 future headers +// mockGet, sub, _ := createDASerSubcomponents(t, 15, 15) +// +// // create fraud service and break one header +// getter := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { +// return mockGet.GetByHeight(ctx, height) +// } +// headGetter := func(ctx context.Context) (*header.ExtendedHeader, error) { +// return mockGet.Head(ctx) +// } +// unmarshaler := fraud.MultiUnmarshaler[*header.ExtendedHeader]{ +// Unmarshalers: map[fraud.ProofType]func([]byte) (fraud.Proof[*header.ExtendedHeader], error){ +// byzantine.BadEncoding: func(data []byte) (fraud.Proof[*header.ExtendedHeader], error) { +// befp := &byzantine.BadEncodingProof{} +// return befp, befp.UnmarshalBinary(data) +// }, +// }, +// } +// +// fserv := fraudserv.NewProofService[*header.ExtendedHeader](ps, +// net.Hosts()[0], +// getter, +// headGetter, +// unmarshaler, +// ds, +// false, +// "private", +// ) +// require.NoError(t, fserv.Start(ctx)) +// mockGet.headers[1] = headerfraud.CreateFraudExtHeader(t, mockGet.headers[1]) +// newCtx := context.Background() +// +// // create and start DASer +// daser, err := NewDASer(avail, sub, mockGet, ds, fserv, newBroadcastMock(1)) +// require.NoError(t, err) +// +// resultCh := make(chan error) +// go fraud.OnProof[*header.ExtendedHeader](newCtx, fserv, byzantine.BadEncoding, +// func(fraud.Proof[*header.ExtendedHeader]) { +// resultCh <- daser.Stop(newCtx) +// }) +// +// require.NoError(t, daser.Start(newCtx)) +// // wait for fraud proof will be handled +// select { +// case <-ctx.Done(): +// t.Fatal(ctx.Err()) +// case res := <-resultCh: +// require.NoError(t, res) +// } +// // wait for manager to finish catchup +// require.False(t, daser.running.Load()) +//} func TestDASerSampleTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) @@ -219,17 +211,18 @@ func TestDASerSampleTimeout(t *testing.T) { getter := getterStub{} avail := mocks.NewMockAvailability(gomock.NewController(t)) doneCh := make(chan struct{}) - avail.EXPECT().SharesAvailable(gomock.Any(), gomock.Any()).DoAndReturn( - func(sampleCtx context.Context, h *header.ExtendedHeader) error { - select { - case <-sampleCtx.Done(): - close(doneCh) - return nil - case <-ctx.Done(): - t.Fatal("call context didn't timeout in time") - return ctx.Err() - } - }) + avail.EXPECT().SharesAvailable(gomock.Any(), gomock.Any()).AnyTimes(). + DoAndReturn( + func(sampleCtx context.Context, h *header.ExtendedHeader) error { + select { + case <-sampleCtx.Done(): + close(doneCh) + return nil + case <-ctx.Done(): + t.Fatal("call context didn't timeout in time") + return ctx.Err() + } + }) ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) sub := new(headertest.Subscriber) @@ -295,18 +288,16 @@ func TestDASer_SamplingWindow(t *testing.T) { // mockGetter, share.Availability, and mock header.Subscriber. func createDASerSubcomponents( t *testing.T, - bServ blockservice.BlockService, numGetter, numSub int, ) (*mockGetter, *headertest.Subscriber, *fraudtest.DummyService[*header.ExtendedHeader]) { - mockGet, sub := createMockGetterAndSub(t, bServ, numGetter, numSub) + mockGet, sub := createMockGetterAndSub(t, numGetter, numSub) fraud := &fraudtest.DummyService[*header.ExtendedHeader]{} return mockGet, sub, fraud } func createMockGetterAndSub( t *testing.T, - bServ blockservice.BlockService, numGetter, numSub int, ) (*mockGetter, *headertest.Subscriber) { @@ -316,10 +307,10 @@ func createMockGetterAndSub( brokenHeightCh: make(chan struct{}), } - mockGet.generateHeaders(t, bServ, 0, numGetter) + mockGet.generateHeaders(t, 0, numGetter) sub := new(headertest.Subscriber) - mockGet.fillSubWithHeaders(t, sub, bServ, numGetter, numGetter+numSub) + mockGet.fillSubWithHeaders(t, sub, numGetter, numGetter+numSub) return mockGet, sub } @@ -327,7 +318,6 @@ func createMockGetterAndSub( func (m *mockGetter) fillSubWithHeaders( t *testing.T, sub *headertest.Subscriber, - bServ blockservice.BlockService, startHeight, endHeight int, ) { @@ -335,9 +325,8 @@ func (m *mockGetter) fillSubWithHeaders( index := 0 for i := startHeight; i < endHeight; i++ { - dah := availability_test.RandFillBS(t, 16, bServ) - - randHeader := headertest.RandExtendedHeaderWithRoot(t, dah) + roots := edstest.RandomAxisRoots(t, 16) + randHeader := headertest.RandExtendedHeaderWithRoot(t, roots) randHeader.RawHeader.Height = int64(i + 1) sub.Headers[index] = randHeader @@ -359,11 +348,11 @@ type mockGetter struct { headers map[int64]*header.ExtendedHeader } -func (m *mockGetter) generateHeaders(t *testing.T, bServ blockservice.BlockService, startHeight, endHeight int) { +func (m *mockGetter) generateHeaders(t *testing.T, startHeight, endHeight int) { for i := startHeight; i < endHeight; i++ { - dah := availability_test.RandFillBS(t, 16, bServ) + roots := edstest.RandomAxisRoots(t, 16) - randHeader := headertest.RandExtendedHeaderWithRoot(t, dah) + randHeader := headertest.RandExtendedHeaderWithRoot(t, roots) randHeader.RawHeader.Height = int64(i + 1) m.headers[int64(i+1)] = randHeader @@ -397,7 +386,10 @@ func (m *mockGetter) GetByHeight(_ context.Context, height uint64) (*header.Exte } }() - return m.headers[int64(height)], nil + if h, ok := m.headers[int64(height)]; ok { + return h, nil + } + return nil, fmt.Errorf("header not found") } type benchGetterStub struct { @@ -407,7 +399,7 @@ type benchGetterStub struct { func newBenchGetter() benchGetterStub { return benchGetterStub{header: &header.ExtendedHeader{ - DAH: &share.Root{RowRoots: make([][]byte, 0)}, + DAH: &share.AxisRoots{RowRoots: make([][]byte, 0)}, }} } @@ -428,7 +420,7 @@ func (m getterStub) GetByHeight(_ context.Context, height uint64) (*header.Exten return &header.ExtendedHeader{ Commit: &types.Commit{}, RawHeader: header.RawHeader{Height: int64(height)}, - DAH: &share.Root{RowRoots: make([][]byte, 0)}, + DAH: &share.AxisRoots{RowRoots: make([][]byte, 0)}, }, nil } @@ -443,3 +435,22 @@ func (m getterStub) GetRangeByHeight( func (m getterStub) Get(context.Context, libhead.Hash) (*header.ExtendedHeader, error) { panic("implement me") } + +// waitHeight waits for the DASer to catch up to the given height. It will return an error if the +// DASer fails to catch up to the given height within the timeout. +func waitHeight(ctx context.Context, daser *DASer, height uint64) error { + for { + err := daser.WaitCatchUp(ctx) + if err != nil { + return err + } + stats, err := daser.SamplingStats(ctx) + if err != nil { + return err + } + if stats.SampledChainHead == height { + return nil + } + time.Sleep(time.Millisecond * 100) + } +} diff --git a/das/worker.go b/das/worker.go index 253dcacdff..88ca387211 100644 --- a/das/worker.go +++ b/das/worker.go @@ -10,7 +10,7 @@ import ( libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) type jobType string diff --git a/go.mod b/go.mod index 5023ad6f1d..875b65f525 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,6 @@ require ( github.com/cristalhq/jwt/v5 v5.4.0 github.com/dgraph-io/badger/v4 v4.2.1-0.20240106094458-1c417aa3799c github.com/etclabscore/go-openrpc-reflect v0.0.37 - github.com/filecoin-project/dagstore v0.5.6 github.com/filecoin-project/go-jsonrpc v0.6.0 github.com/gammazero/workerpool v1.1.3 github.com/gofrs/flock v0.12.1 @@ -39,15 +38,15 @@ require ( github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-badger4 v0.1.5 - github.com/ipfs/go-ipld-cbor v0.1.0 + github.com/ipfs/go-ipfs-delay v0.0.1 github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 - github.com/ipld/go-car v0.6.2 + github.com/klauspost/reedsolomon v1.12.1 github.com/libp2p/go-libp2p v0.36.2 github.com/libp2p/go-libp2p-kad-dht v0.26.1 - github.com/libp2p/go-libp2p-pubsub v0.11.0 + github.com/libp2p/go-libp2p-pubsub v0.12.0 github.com/libp2p/go-libp2p-record v0.2.0 github.com/libp2p/go-libp2p-routing-helpers v0.7.4 github.com/mitchellh/go-homedir v1.1.0 @@ -56,7 +55,7 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multihash v0.2.3 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.0 github.com/rollkit/go-da v0.5.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 @@ -72,12 +71,12 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 go.opentelemetry.io/proto/otlp v1.3.1 - go.uber.org/fx v1.22.1 + go.uber.org/fx v1.22.2 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.25.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.7.0 - golang.org/x/text v0.16.0 + golang.org/x/crypto v0.26.0 + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa + golang.org/x/sync v0.8.0 + golang.org/x/text v0.17.0 google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 ) @@ -204,19 +203,11 @@ require ( github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-blockservice v0.5.2 // indirect - github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect - github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-log v1.0.5 // indirect - github.com/ipfs/go-merkledag v0.11.0 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect - github.com/ipfs/go-verifcid v0.0.3 // indirect - github.com/ipld/go-car/v2 v2.13.1 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect @@ -227,7 +218,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/klauspost/reedsolomon v1.12.1 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -246,7 +236,7 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.61 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect @@ -264,23 +254,22 @@ require ( github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.19.1 // indirect + github.com/onsi/ginkgo/v2 v2.20.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect github.com/pion/datachannel v1.5.8 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect github.com/pion/ice/v2 v2.3.34 // indirect - github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/interceptor v0.1.30 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.8 // indirect - github.com/pion/sctp v1.8.20 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect @@ -294,7 +283,7 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.2 // indirect + github.com/quic-go/quic-go v0.46.0 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect github.com/rakyll/statik v0.1.7 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect @@ -319,10 +308,8 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/ulikunitz/xz v0.5.10 // indirect - github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect - github.com/whyrusleeping/cbor-gen v0.1.2 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/wlynxg/anet v0.0.3 // indirect + github.com/wlynxg/anet v0.0.4 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect @@ -330,17 +317,16 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/dig v1.17.1 // indirect + go.uber.org/dig v1.18.0 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/tools v0.24.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/api v0.169.0 // indirect diff --git a/go.sum b/go.sum index b2a56315e9..7687a6617a 100644 --- a/go.sum +++ b/go.sum @@ -194,7 +194,6 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= @@ -207,8 +206,6 @@ github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMb github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= @@ -229,7 +226,6 @@ github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwS github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= @@ -246,7 +242,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= @@ -268,7 +263,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -314,11 +308,8 @@ github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6 github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs= @@ -326,7 +317,6 @@ github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= @@ -360,8 +350,6 @@ github.com/celestiaorg/celestia-core v1.40.0-tm-v0.34.29 h1:J79TAjizxwIvm7/k+WI3 github.com/celestiaorg/celestia-core v1.40.0-tm-v0.34.29/go.mod h1:5jJ5magtH7gQOwSYfS/m5fliIS7irKunLV7kLNaD8o0= github.com/celestiaorg/cosmos-sdk v1.24.1-sdk-v0.46.16 h1:SeQ7Y/CyOcUMKo7mQiexaj/pZ/xIgyuZFIwYZwpSkWE= github.com/celestiaorg/cosmos-sdk v1.24.1-sdk-v0.46.16/go.mod h1:Bpl1LSWiDpQumgOhhMTZBMopqa0j7fRasIhvTZB44P0= -github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= -github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403/go.mod h1:cCGM1UoMvyTk8k62mkc+ReVu8iHBCtSBAAL4wYU7KEI= github.com/celestiaorg/go-fraud v0.2.1 h1:oYhxI0gM/EpGRgbVQdRI/LSlqyT65g/WhQGSVGfx09w= github.com/celestiaorg/go-fraud v0.2.1/go.mod h1:lNY1i4K6kUeeE60Z2VK8WXd+qXb8KRzfBhvwPkK6aUc= github.com/celestiaorg/go-header v0.6.2 h1:qgWyJQg+/x6k4QAfN1rPt2HXHZjQOmCqD0ct4dFBIZY= @@ -390,21 +378,17 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= @@ -455,7 +439,6 @@ github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJ github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= @@ -466,10 +449,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -499,9 +480,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= @@ -533,7 +511,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -541,10 +518,6 @@ github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= @@ -558,7 +531,6 @@ github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWa github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -629,7 +601,6 @@ github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+ github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -641,19 +612,13 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/gammazero/deque v0.2.0 h1:SkieyNB4bg2/uZZLxvya0Pq6diUlwx7m2TeT7GAIWaA= github.com/gammazero/deque v0.2.0/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= @@ -673,7 +638,6 @@ github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjX github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -694,9 +658,7 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -769,7 +731,6 @@ github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -865,11 +826,9 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -915,7 +874,6 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -996,7 +954,6 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= -github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -1028,7 +985,6 @@ github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6 github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -1049,168 +1005,49 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/boxo v0.22.0 h1:QTC+P5uhsBNq6HzX728nsLyFW6rYDeR/5hggf9YZX78= github.com/ipfs/boxo v0.22.0/go.mod h1:yp1loimX0BDYOR0cyjtcXHv15muEh5V1FqO2QLlzykw= -github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= -github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= -github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= -github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= -github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= -github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= -github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA= -github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= -github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-block-format v0.1.1/go.mod h1:+McEIT+g52p+zz5xGAABGSOKrzmrdX97bc0USBdWPUs= -github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= -github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk= -github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= -github.com/ipfs/go-blockservice v0.5.2 h1:in9Bc+QcXwd1apOVM7Un9t8tixPKdaHQFdLSUM1Xgk8= -github.com/ipfs/go-blockservice v0.5.2/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= -github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= -github.com/ipfs/go-cid v0.3.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= -github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= -github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= github.com/ipfs/go-ds-badger4 v0.1.5 h1:MwrTsIUJIqH/ChuDdUOzxwxMxHx/Li1ECoSCKsCUxiA= github.com/ipfs/go-ds-badger4 v0.1.5/go.mod h1:LUU2FbhNdmhAbJmMeoahVRbe4GsduAODSJHWJJh2Vo4= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= -github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= -github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= -github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= -github.com/ipfs/go-ipfs-blockstore v1.3.0/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= -github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7PTTDQNsQ= -github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= -github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= -github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= -github.com/ipfs/go-ipfs-ds-help v1.1.1/go.mod h1:75vrVCkSdSFidJscs8n4W+77AtTpCIAdDGAwjitJMIo= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.1 h1:jMzo2VhLKSHbVe+mHNzYgs95n0+t0Q69GQ5WhRDZV/s= -github.com/ipfs/go-ipfs-exchange-interface v0.2.1/go.mod h1:MUsYn6rKbG6CTtsDp+lKJPmVt3ZrCViNyH3rfPGsZ2E= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= -github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= -github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= -github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= -github.com/ipfs/go-ipfs-keystore v0.1.0/go.mod h1:LvLw7Qhnb0RlMOfCzK6OmyWxICip6lQ06CCmdbee75U= -github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= -github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= -github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= -github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= -github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= -github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= -github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= -github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.3.1/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= -github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= -github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= -github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= -github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= -github.com/ipfs/go-libipfs v0.1.0/go.mod h1:qX0d9h+wu53PFtCTXxdXVBakd6ZCvGDdkZUKmdLMLx0= -github.com/ipfs/go-libipfs v0.3.0/go.mod h1:pSUHZ5qPJTAidsxe9bAeHp3KIiw2ODEW2a2kM3v+iXI= -github.com/ipfs/go-libipfs v0.4.0/go.mod h1:XsU2cP9jBhDrXoJDe0WxikB8XcVmD3k2MEZvB3dbYu8= -github.com/ipfs/go-libipfs v0.6.0 h1:3FuckAJEm+zdHbHbf6lAyk0QUzc45LsFcGw102oBCZM= -github.com/ipfs/go-libipfs v0.6.0/go.mod h1:UjjDIuehp2GzlNP0HEr5I9GfFT7zWgst+YfpUEIThtw= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= @@ -1219,93 +1056,28 @@ github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72g github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= -github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0= -github.com/ipfs/go-merkledag v0.9.0/go.mod h1:bPHqkHt5OZ0p1n3iqPeDiw2jIBkjAytRjS3WSBwjq90= -github.com/ipfs/go-merkledag v0.10.0/go.mod h1:zkVav8KiYlmbzUzNM6kENzkdP5+qR7+2mCwxkQ6GIj8= -github.com/ipfs/go-merkledag v0.11.0 h1:DgzwK5hprESOzS4O1t/wi6JDpyVQdvm9Bs59N/jqfBY= -github.com/ipfs/go-merkledag v0.11.0/go.mod h1:Q4f/1ezvBiJV0YCIXvt51W/9/kqJGH4I1LsA7+djsM4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= -github.com/ipfs/go-namesys v0.7.0/go.mod h1:KYSZBVZG3VJC34EfqqJPG7T48aWgxseoMPAPA5gLyyQ= -github.com/ipfs/go-path v0.1.1/go.mod h1:vC8q4AKOtrjJz2NnllIrmr2ZbGlF5fW2OKKyhV9ggb0= -github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= -github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-peertaskqueue v0.8.0/go.mod h1:cz8hEnnARq4Du5TGqiWKgMr/BOSQ5XOgMOh1K5YYKKM= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= -github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= -github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= -github.com/ipfs/go-unixfs v0.4.3/go.mod h1:TSG7G1UuT+l4pNj91raXAPkX0BhJi3jST1FDTfQ5QyM= -github.com/ipfs/go-unixfs v0.4.4/go.mod h1:TSG7G1UuT+l4pNj91raXAPkX0BhJi3jST1FDTfQ5QyM= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= -github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= -github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= -github.com/ipfs/go-unixfsnode v1.5.1/go.mod h1:ed79DaG9IEuZITJVQn4U6MZDftv6I3ygUBLPfhEbHvk= -github.com/ipfs/go-unixfsnode v1.5.2/go.mod h1:NlOebRwYx8lMCNMdhAhEspYPBD3obp7TE0LvBqHY+ks= -github.com/ipfs/go-unixfsnode v1.7.1/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= -github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= -github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= -github.com/ipfs/go-verifcid v0.0.3 h1:gmRKccqhWDocCRkC+a59g5QW7uJw5bpX9HWBevXa0zs= -github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJDzQJRvOUw= -github.com/ipfs/interface-go-ipfs-core v0.9.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= -github.com/ipfs/interface-go-ipfs-core v0.10.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= -github.com/ipld/go-car v0.5.0/go.mod h1:ppiN5GWpjOZU9PgpAZ9HbZd9ZgSpwPMr48fGRJOWmvE= -github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= -github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= -github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= -github.com/ipld/go-car/v2 v2.5.1/go.mod h1:jKjGOqoCj5zn6KjnabD6JbnCsMntqU2hLiU6baZVO3E= -github.com/ipld/go-car/v2 v2.8.0/go.mod h1:a+BnAxUqgr7wcWxW/lI6ctyEQ2v9gjBChPytwFMp2f4= -github.com/ipld/go-car/v2 v2.10.1/go.mod h1:sQEkXVM3csejlb1kCCb+vQ/pWBKX9QtvsrysMQjOgOg= -github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= -github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= -github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= -github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= -github.com/ipld/go-codec-dagpb v1.4.1/go.mod h1:XdXTO/TUD/ra9RcK/NfmwBfr1JpFxM2uRKaB9oe4LxE= -github.com/ipld/go-codec-dagpb v1.5.0/go.mod h1:0yRIutEFD8o1DGVqw4RSHh+BUTlJA9XWldxaaWR/o4g= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= -github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= -github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= -github.com/ipld/go-ipld-prime v0.18.0/go.mod h1:735yXW548CKrLwVCYXzqx90p5deRJMVVxM9eJ4Qe+qE= -github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= -github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= -github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= @@ -1333,7 +1105,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -1351,21 +1122,13 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= @@ -1374,17 +1137,14 @@ github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HA github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -1403,8 +1163,6 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= @@ -1412,283 +1170,120 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.19.0/go.mod h1:Ki9jJXLO2YqrTIFxofV7Twyd3INWPT97+r8hGt7XPjI= -github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= -github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= -github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o= -github.com/libp2p/go-libp2p v0.25.1/go.mod h1:xnK9/1d9+jeQCVvi/f1g12KqtVi/jP/SijtKV1hML3g= github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-circuit v0.6.0/go.mod h1:kB8hY+zCpMeScyvFrKrGicRdid6vNXbunKE4rXATZ0M= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-core v0.14.0/go.mod h1:tLasfcVdTXnixsLB0QYaT1syJOhsbrhG7q6pGrHtBg8= github.com/libp2p/go-libp2p-core v0.15.1/go.mod h1:agSaboYM4hzB1cWekgVReqV5M4g5M+2eNNejV+1EEhs= -github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= -github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= github.com/libp2p/go-libp2p-kad-dht v0.26.1 h1:AazV3LCImYVkDUGAHx5lIEgZ9iUI2QQKH5GMRQU8uEA= github.com/libp2p/go-libp2p-kad-dht v0.26.1/go.mod h1:mqRUGJ/+7ziQ3XknU2kKHfsbbgb9xL65DXjPOJwmZF8= -github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-noise v0.4.0/go.mod h1:BzzY5pyzCYSyJbQy9oD8z5oP2idsafjt4/X42h9DjZU= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.11.0 h1:+JvS8Kty0OiyUiN0i8H5JbaCgjnJTRnTHe4rU88dLFc= -github.com/libp2p/go-libp2p-pubsub v0.11.0/go.mod h1:QEb+hEV9WL9wCiUAnpY29FZR6W3zK8qYlaml8R4q6gQ= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-pubsub v0.12.0 h1:PENNZjSfk8KYxANRlpipdS7+BfLmOl3L2E/6vSNjbdI= +github.com/libp2p/go-libp2p-pubsub v0.12.0/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= github.com/libp2p/go-libp2p-quic-transport v0.17.0/go.mod h1:x4pw61P3/GRCcSLypcQJE/Q2+E9f4X+5aRcZLXf20LM= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-resource-manager v0.2.1/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= github.com/libp2p/go-libp2p-swarm v0.10.2/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= github.com/libp2p/go-libp2p-testing v0.9.0/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= github.com/libp2p/go-libp2p-testing v0.9.2/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= -github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-tls v0.4.1/go.mod h1:EKCixHEysLNDlLUoKxv+3f/Lp90O2EXNjTr0UQDnrIw= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= -github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= github.com/libp2p/go-libp2p-yamux v0.9.1/go.mod h1:wRc6wvyxQINFcKe7daL4BeQ02Iyp+wxyC8WCNfngBrA= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= -github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= github.com/libp2p/go-stream-muxer-multistream v0.4.0/go.mod h1:nb+dGViZleRP4XcyHuZSVrJCBl55nRBOMmiSL/dyziw= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= github.com/libp2p/go-tcp-transport v0.5.1/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= -github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-ws-transport v0.6.0/go.mod h1:dXqtI9e2JV9FtF1NOtWVZSKXh5zXvnuwPXfj8GPBbYU= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= github.com/libp2p/go-yamux/v3 v3.0.1/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= github.com/libp2p/go-yamux/v3 v3.0.2/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= github.com/libp2p/go-yamux/v3 v3.1.1/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= -github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= -github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg= github.com/lucas-clemente/quic-go v0.27.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI= -github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= -github.com/lucas-clemente/quic-go v0.29.1/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1706,22 +1301,15 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= -github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI= github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= -github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/marten-seemann/webtransport-go v0.1.1/go.mod h1:kBEh5+RSvOA4troP1vyOVBWK4MIMzDICXVrvCPrYcrM= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= @@ -1740,11 +1328,9 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -1754,19 +1340,14 @@ github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1816,7 +1397,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= @@ -1831,10 +1411,7 @@ github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYg github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= @@ -1845,41 +1422,18 @@ github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9x github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= -github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= -github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= -github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multicodec v0.8.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= @@ -1890,20 +1444,12 @@ github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUj github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.0/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-multistream v0.3.0/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= github.com/multiformats/go-multistream v0.3.1/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= -github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= -github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1944,7 +1490,6 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1952,30 +1497,16 @@ github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvw github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= -github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= -github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= @@ -2014,8 +1545,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= -github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= @@ -2032,8 +1561,8 @@ github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= +github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= @@ -2044,10 +1573,10 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= -github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= -github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= @@ -2060,14 +1589,13 @@ github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLh github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= -github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -2077,10 +1605,6 @@ github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUI github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -2096,17 +1620,14 @@ github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66Id github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= +github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -2123,7 +1644,6 @@ github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -2137,19 +1657,13 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc= -github.com/quic-go/qtls-go1-19 v0.2.0/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo= -github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= -github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.5.1/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= +github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= @@ -2172,8 +1686,6 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rollkit/go-da v0.5.0 h1:sQpZricNS+2TLx3HMjNWhtRfqtvVC/U4pWHpfUz3eN4= @@ -2181,9 +1693,7 @@ github.com/rollkit/go-da v0.5.0/go.mod h1:VsUeAoPvKl4Y8wWguu/VibscYiFFePkkrvZWyT github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= @@ -2192,7 +1702,6 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= @@ -2235,11 +1744,8 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= @@ -2297,7 +1803,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= @@ -2315,7 +1820,6 @@ github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2l github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= -github.com/thoas/go-funk v0.9.1/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= @@ -2333,7 +1837,6 @@ github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= @@ -2347,7 +1850,6 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:s github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -2371,39 +1873,19 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.11.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= -github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= -github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= -github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20221220214510-0333c149dec0/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.1.2 h1:WQFlrPhpcQl+M2/3dP5cvlTLWPVsL6LGBb9jJt6l/cA= -github.com/whyrusleeping/cbor-gen v0.1.2/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= -github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= -github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= @@ -2436,7 +1918,6 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -2450,9 +1931,6 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0 h1:2JydY5UiDpqvj2p7sO9bgHuhTy4hgTZ0ymehdq/Ob0Q= go.opentelemetry.io/contrib/instrumentation/runtime v0.45.0/go.mod h1:ch3a5QxOqVWxas4CzjCFFOOQe+7HgAXC/N1oVxS9DK4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= @@ -2464,20 +1942,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= @@ -2490,16 +1962,10 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -2512,9 +1978,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -2526,9 +1990,6 @@ go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -2539,13 +2000,10 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -2555,7 +2013,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2565,29 +2022,21 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -2596,15 +2045,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20230129154200-a960b3787bd2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2623,26 +2065,20 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2663,16 +2099,13 @@ golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2721,24 +2154,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2784,8 +2209,8 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2802,7 +2227,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2810,10 +2234,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2821,7 +2242,6 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2890,7 +2310,6 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2901,28 +2320,19 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2932,22 +2342,19 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2958,14 +2365,12 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3003,12 +2408,10 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -3036,7 +2439,6 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -3044,14 +2446,11 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -3319,7 +2718,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -3353,7 +2751,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go index 7a876c7ecf..5f4bdca084 100644 --- a/header/headertest/fraud/testing.go +++ b/header/headertest/fraud/testing.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/ipfs/boxo/blockservice" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -17,9 +16,9 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/store" ) // FraudMaker allows to produce an invalid header at the specified height in order to produce the @@ -45,7 +44,7 @@ func NewFraudMaker(t *testing.T, height int64, vals []types.PrivValidator, valSe } } -func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header.ConstructFn { +func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *store.Store) header.ConstructFn { return func( h *types.Header, comm *types.Commit, @@ -58,14 +57,14 @@ func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header hdr := *h if h.Height == f.height { - adder := ipld.NewProofsAdder(odsSize) + adder := ipld.NewProofsAdder(odsSize, false) square := edstest.RandByzantineEDS(f.t, odsSize, nmt.NodeVisitor(adder.VisitFn())) dah, err := da.NewDataAvailabilityHeader(square) require.NoError(f.t, err) hdr.DataHash = dah.Hash() ctx := ipld.CtxWithProofsAdder(context.Background(), adder) - require.NoError(f.t, edsStore.Put(ctx, h.DataHash.Bytes(), square)) + require.NoError(f.t, edsStore.PutODSQ4(ctx, &dah, uint64(h.Height), square)) *eds = *square } @@ -89,11 +88,8 @@ func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header func CreateFraudExtHeader( t *testing.T, eh *header.ExtendedHeader, - serv blockservice.BlockService, ) *header.ExtendedHeader { square := edstest.RandByzantineEDS(t, len(eh.DAH.RowRoots)) - err := ipld.ImportEDS(context.Background(), square, serv) - require.NoError(t, err) dah, err := da.NewDataAvailabilityHeader(square) require.NoError(t, err) eh.DAH = &dah diff --git a/header/headertest/testing.go b/header/headertest/testing.go index 245288b8c5..ddac1995a6 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -66,7 +66,7 @@ func NewTestSuite(t *testing.T, numValidators int, blockTime time.Duration) *Tes } func (s *TestSuite) genesis() *header.ExtendedHeader { - dah := share.EmptyRoot() + dah := share.EmptyEDSRoots() gen := RandRawHeader(s.t) @@ -152,7 +152,7 @@ func (s *TestSuite) NextHeader() *header.ExtendedHeader { return s.head } - dah := share.EmptyRoot() + dah := share.EmptyEDSRoots() height := s.Head().Height() + 1 rh := s.GenRawHeader(height, s.Head().Hash(), libhead.Hash(s.Head().Commit.Hash()), dah.Hash()) s.head = &header.ExtendedHeader{ @@ -229,7 +229,7 @@ func RandExtendedHeader(t testing.TB) *header.ExtendedHeader { } func RandExtendedHeaderAtTimestamp(t testing.TB, timestamp time.Time) *header.ExtendedHeader { - dah := share.EmptyRoot() + dah := share.EmptyEDSRoots() rh := RandRawHeader(t) rh.DataHash = dah.Hash() @@ -328,9 +328,9 @@ func ExtendedHeadersFromEdsses(t testing.TB, edsses []*rsmt2d.ExtendedDataSquare for i, eds := range edsses { gen := RandRawHeader(t) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - gen.DataHash = dah.Hash() + gen.DataHash = roots.Hash() gen.ValidatorsHash = valSet.Hash() gen.NextValidatorsHash = valSet.Hash() gen.Height = int64(i + 1) @@ -347,7 +347,7 @@ func ExtendedHeadersFromEdsses(t testing.TB, edsses []*rsmt2d.ExtendedDataSquare RawHeader: *gen, Commit: commit, ValidatorSet: valSet, - DAH: dah, + DAH: roots, } require.NoError(t, eh.Validate()) headers[i] = eh @@ -358,7 +358,7 @@ func ExtendedHeadersFromEdsses(t testing.TB, edsses []*rsmt2d.ExtendedDataSquare func ExtendedHeaderFromEDS(t testing.TB, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { valSet, vals := RandValidatorSet(10, 10) gen := RandRawHeader(t) - dah, err := share.NewRoot(eds) + dah, err := share.NewAxisRoots(eds) require.NoError(t, err) gen.DataHash = dah.Hash() diff --git a/libs/edssser/edssser.go b/libs/edssser/edssser.go index 10c9c6b3bd..dfb01e1756 100644 --- a/libs/edssser/edssser.go +++ b/libs/edssser/edssser.go @@ -9,17 +9,15 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - - "github.com/celestiaorg/celestia-app/v2/pkg/da" - - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/store" ) type Config struct { EDSSize int EDSWrites int + WriteFrom int EnableLog bool LogFilePath string StatLogFreq int @@ -29,25 +27,23 @@ type Config struct { // EDSsser stand for EDS Store Stresser. type EDSsser struct { config Config - datastore datastore.Batching edsstoreMu sync.Mutex - edsstore *eds.Store + edsstore *store.Store statsFileMu sync.Mutex statsFile *os.File } -func NewEDSsser(path string, datastore datastore.Batching, cfg Config) (*EDSsser, error) { - storeCfg := eds.DefaultParameters() - edsstore, err := eds.NewStore(storeCfg, path, datastore) +func NewEDSsser(path string, cfg Config) (*EDSsser, error) { + storeCfg := store.DefaultParameters() + edsstore, err := store.NewStore(storeCfg, path) if err != nil { return nil, err } return &EDSsser{ - config: cfg, - datastore: datastore, - edsstore: edsstore, + config: cfg, + edsstore: edsstore, }, nil } @@ -55,23 +51,14 @@ func (ss *EDSsser) Run(ctx context.Context) (stats Stats, err error) { ss.edsstoreMu.Lock() defer ss.edsstoreMu.Unlock() - err = ss.edsstore.Start(ctx) - if err != nil { - return stats, err - } defer func() { err = errors.Join(err, ss.edsstore.Stop(ctx)) }() - edsHashes, err := ss.edsstore.List() - if err != nil { - return stats, err - } - fmt.Printf("recovered %d EDSes\n\n", len(edsHashes)) - t := &testing.T{} - for toWrite := ss.config.EDSWrites - len(edsHashes); ctx.Err() == nil && toWrite > 0; toWrite-- { - took, err := ss.put(ctx, t) + writeTo := ss.config.WriteFrom + ss.config.EDSWrites + for height := ss.config.WriteFrom; ctx.Err() == nil && height < writeTo; height++ { + took, err := ss.put(ctx, t, height) stats.TotalWritten++ stats.TotalTime += took @@ -153,7 +140,7 @@ AvgTime %s ) } -func (ss *EDSsser) put(ctx context.Context, t *testing.T) (time.Duration, error) { +func (ss *EDSsser) put(ctx context.Context, t *testing.T, height int) (time.Duration, error) { ctx, cancel := context.WithTimeout(ctx, ss.config.OpTimeout) if ss.config.OpTimeout == 0 { ctx, cancel = context.WithCancel(ctx) @@ -162,12 +149,12 @@ func (ss *EDSsser) put(ctx context.Context, t *testing.T) (time.Duration, error) // divide by 2 to get ODS size as expected by RandEDS square := edstest.RandEDS(t, ss.config.EDSSize/2) - dah, err := da.NewDataAvailabilityHeader(square) + roots, err := share.NewAxisRoots(square) if err != nil { return 0, err } now := time.Now() - err = ss.edsstore.Put(ctx, dah.Hash(), square) + err = ss.edsstore.PutODSQ4(ctx, roots, uint64(height), square) return time.Since(now), err } diff --git a/libs/utils/close.go b/libs/utils/close.go new file mode 100644 index 0000000000..c9a2ddaa30 --- /dev/null +++ b/libs/utils/close.go @@ -0,0 +1,15 @@ +package utils + +import ( + "io" + + logging "github.com/ipfs/go-log/v2" +) + +// CloseAndLog closes the closer and logs any error that occurs. The function is handy wrapping +// to group closing and logging in one call for defer statements. +func CloseAndLog(log logging.StandardLogger, name string, closer io.Closer) { + if err := closer.Close(); err != nil { + log.Warnf("closing %s: %s", name, err) + } +} diff --git a/share/getters/utils.go b/libs/utils/ctx.go similarity index 53% rename from share/getters/utils.go rename to libs/utils/ctx.go index 2260183b4f..5a13232a16 100644 --- a/share/getters/utils.go +++ b/libs/utils/ctx.go @@ -1,24 +1,22 @@ -package getters +package utils import ( "context" - "errors" "time" - - logging "github.com/ipfs/go-log/v2" - "go.opentelemetry.io/otel" ) -var ( - tracer = otel.Tracer("share/getters") - log = logging.Logger("share/getters") +// ResetContextOnError returns a fresh context if the given context has an error. +func ResetContextOnError(ctx context.Context) context.Context { + if ctx.Err() != nil { + ctx = context.Background() + } - errOperationNotSupported = errors.New("operation is not supported") -) + return ctx +} -// ctxWithSplitTimeout will split timeout stored in context by splitFactor and return the result if +// CtxWithSplitTimeout will split timeout stored in context by splitFactor and return the result if // it is greater than minTimeout. minTimeout == 0 will be ignored, splitFactor <= 0 will be ignored -func ctxWithSplitTimeout( +func CtxWithSplitTimeout( ctx context.Context, splitFactor int, minTimeout time.Duration, @@ -42,16 +40,3 @@ func ctxWithSplitTimeout( } return context.WithTimeout(ctx, splitTimeout) } - -// ErrorContains reports whether any error in err's tree matches any error in targets tree. -func ErrorContains(err, target error) bool { - if errors.Is(err, target) || target == nil { - return true - } - - target = errors.Unwrap(target) - if target == nil { - return false - } - return ErrorContains(err, target) -} diff --git a/share/getters/utils_test.go b/libs/utils/ctx_test.go similarity index 62% rename from share/getters/utils_test.go rename to libs/utils/ctx_test.go index ce94d3ac04..c1fccfa48a 100644 --- a/share/getters/utils_test.go +++ b/libs/utils/ctx_test.go @@ -1,119 +1,13 @@ -package getters +package utils import ( "context" - "errors" - "fmt" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func Test_ErrorContains(t *testing.T) { - err1 := errors.New("1") - err2 := errors.New("2") - - w1 := func(err error) error { - return fmt.Errorf("wrap1: %w", err) - } - w2 := func(err error) error { - return fmt.Errorf("wrap1: %w", err) - } - - type args struct { - err error - target error - } - tests := []struct { - name string - args args - want bool - }{ - { - "nil err", - args{ - err: nil, - target: err1, - }, - false, - }, - { - "nil target", - args{ - err: err1, - target: nil, - }, - true, - }, - { - "errors.Is true", - args{ - err: w1(err1), - target: err1, - }, - true, - }, - { - "errors.Is false", - args{ - err: w1(err1), - target: err2, - }, - false, - }, - { - "same wrap but different base error", - args{ - err: w1(err1), - target: w1(err2), - }, - false, - }, - { - "both wrapped true", - args{ - err: w1(err1), - target: w2(err1), - }, - true, - }, - { - "both wrapped false", - args{ - err: w1(err1), - target: w2(err2), - }, - false, - }, - { - "multierr first in slice", - args{ - err: errors.Join(w1(err1), w2(err2)), - target: w2(err1), - }, - true, - }, - { - "multierr second in slice", - args{ - err: errors.Join(w1(err1), w2(err2)), - target: w1(err2), - }, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equalf(t, - tt.want, - ErrorContains(tt.args.err, tt.args.target), - "ErrorContains(%v, %v)", tt.args.err, tt.args.target) - }) - } -} - func Test_ctxWithSplitTimeout(t *testing.T) { type args struct { ctxTimeout time.Duration @@ -216,7 +110,7 @@ func Test_ctxWithSplitTimeout(t *testing.T) { ctx, cancel = context.WithTimeout(ctx, tt.args.ctxTimeout) } t.Cleanup(cancel) - got, _ := ctxWithSplitTimeout(ctx, sf, tt.args.minTimeout) + got, _ := CtxWithSplitTimeout(ctx, sf, tt.args.minTimeout) dl, ok := got.Deadline() // in case no deadline is found in ctx or not expected to be found, check both cases apply at the // same time diff --git a/libs/utils/resetctx.go b/libs/utils/resetctx.go deleted file mode 100644 index a108cc27b4..0000000000 --- a/libs/utils/resetctx.go +++ /dev/null @@ -1,14 +0,0 @@ -package utils - -import ( - "context" -) - -// ResetContextOnError returns a fresh context if the given context has an error. -func ResetContextOnError(ctx context.Context) context.Context { - if ctx.Err() != nil { - ctx = context.Background() - } - - return ctx -} diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go index 39815179bd..786fe7d7fb 100644 --- a/nodebuilder/blob/mocks/api.go +++ b/nodebuilder/blob/mocks/api.go @@ -126,3 +126,18 @@ func (mr *MockModuleMockRecorder) Submit(arg0, arg1, arg2 interface{}) *gomock.C mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Submit", reflect.TypeOf((*MockModule)(nil).Submit), arg0, arg1, arg2) } + +// Subscribe mocks base method. +func (m *MockModule) Subscribe(arg0 context.Context, arg1 share.Namespace) (<-chan *blob.SubscriptionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Subscribe", arg0, arg1) + ret0, _ := ret[0].(<-chan *blob.SubscriptionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Subscribe indicates an expected call of Subscribe. +func (mr *MockModuleMockRecorder) Subscribe(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockModule)(nil).Subscribe), arg0, arg1) +} diff --git a/nodebuilder/blob/module.go b/nodebuilder/blob/module.go index 3ff6b9892b..cf07ed3732 100644 --- a/nodebuilder/blob/module.go +++ b/nodebuilder/blob/module.go @@ -9,7 +9,7 @@ import ( "github.com/celestiaorg/celestia-node/header" headerService "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/state" - "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" ) func ConstructModule() fx.Option { @@ -27,7 +27,7 @@ func ConstructModule() fx.Option { fx.Provide(fx.Annotate( func( state state.Module, - sGetter share.Getter, + sGetter shwap.Getter, getByHeightFn func(context.Context, uint64) (*header.ExtendedHeader, error), subscribeFn func(context.Context) (<-chan *header.ExtendedHeader, error), ) *blob.Service { diff --git a/nodebuilder/core/module.go b/nodebuilder/core/module.go index 0b0c409406..441907ce32 100644 --- a/nodebuilder/core/module.go +++ b/nodebuilder/core/module.go @@ -12,8 +12,8 @@ import ( "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" + "github.com/celestiaorg/celestia-node/store" ) // ConstructModule collects all the components and services related to managing the relationship @@ -38,7 +38,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fxutil.ProvideAs( func( fetcher *core.BlockFetcher, - store *eds.Store, + store *store.Store, construct header.ConstructFn, opts []core.Option, ) (*core.Exchange, error) { @@ -55,7 +55,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option fetcher *core.BlockFetcher, pubsub *shrexsub.PubSub, construct header.ConstructFn, - store *eds.Store, + store *store.Store, chainID p2p.Network, opts []core.Option, ) (*core.Listener, error) { diff --git a/nodebuilder/das/constructors.go b/nodebuilder/das/constructors.go index 37a90086a8..8d6f9d1168 100644 --- a/nodebuilder/das/constructors.go +++ b/nodebuilder/das/constructors.go @@ -15,7 +15,7 @@ import ( "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) var _ Module = (*daserStub)(nil) diff --git a/nodebuilder/node.go b/nodebuilder/node.go index c0ba8f78e8..b328d8c590 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -31,7 +31,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/share" "github.com/celestiaorg/celestia-node/nodebuilder/state" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store" ) var ( @@ -59,13 +59,13 @@ type Node struct { GatewayServer *gateway.Server `optional:"true"` // block store - EDSStore *eds.Store `optional:"true"` + EDSStore *store.Store `optional:"true"` // p2p components Host host.Host ConnGater *conngater.BasicConnectionGater Routing routing.PeerRouting - DataExchange exchange.Interface + DataExchange exchange.SessionExchange BlockService blockservice.BlockService // p2p protocols PubSub *pubsub.PubSub diff --git a/nodebuilder/node/mocks/api.go b/nodebuilder/node/mocks/api.go index d8789a771c..3d284c9e06 100644 --- a/nodebuilder/node/mocks/api.go +++ b/nodebuilder/node/mocks/api.go @@ -94,3 +94,18 @@ func (mr *MockModuleMockRecorder) LogLevelSet(arg0, arg1, arg2 interface{}) *gom mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogLevelSet", reflect.TypeOf((*MockModule)(nil).LogLevelSet), arg0, arg1, arg2) } + +// Ready mocks base method. +func (m *MockModule) Ready(arg0 context.Context) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ready", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Ready indicates an expected call of Ready. +func (mr *MockModuleMockRecorder) Ready(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockModule)(nil).Ready), arg0) +} diff --git a/nodebuilder/node_test.go b/nodebuilder/node_test.go index 833729dcda..8155d7e081 100644 --- a/nodebuilder/node_test.go +++ b/nodebuilder/node_test.go @@ -136,7 +136,7 @@ func TestEmptyBlockExists(t *testing.T) { {tp: node.Bridge}, {tp: node.Full}, // technically doesn't need to be tested as a SharesAvailable call to - // light node short circuits on an empty Root + // light node short circuits on an empty EDS {tp: node.Light}, } for i, tt := range test { @@ -147,7 +147,7 @@ func TestEmptyBlockExists(t *testing.T) { // ensure an empty block exists in store - eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyRoot()) + eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyEDSRoots()) err = node.ShareServ.SharesAvailable(ctx, eh) require.NoError(t, err) diff --git a/nodebuilder/p2p/bitswap.go b/nodebuilder/p2p/bitswap.go deleted file mode 100644 index d419a53ab4..0000000000 --- a/nodebuilder/p2p/bitswap.go +++ /dev/null @@ -1,138 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - - "github.com/ipfs/boxo/bitswap" - "github.com/ipfs/boxo/bitswap/network" - "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/exchange" - "github.com/ipfs/go-datastore" - ipfsmetrics "github.com/ipfs/go-metrics-interface" - ipfsprom "github.com/ipfs/go-metrics-prometheus" - routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" - hst "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/fx" - - "github.com/celestiaorg/celestia-node/share/eds" -) - -const ( - // default size of bloom filter in blockStore - defaultBloomFilterSize = 512 << 10 - // default amount of hash functions defined for bloom filter - defaultBloomFilterHashes = 7 - // default size of arc cache in blockStore - defaultARCCacheSize = 64 << 10 -) - -// dataExchange provides a constructor for IPFS block's DataExchange over BitSwap. -func dataExchange(params bitSwapParams) exchange.Interface { - prefix := protocolID(params.Net) - net := network.NewFromIpfsHost(params.Host, &routinghelpers.Null{}, network.Prefix(prefix)) - - opts := []bitswap.Option{ - // Server options - bitswap.ProvideEnabled(false), // we don't provide blocks over DHT - // NOTE: These below are required for our protocol to work reliably. - // // See https://github.com/celestiaorg/celestia-node/issues/732 - bitswap.SetSendDontHaves(false), - - // Client options - bitswap.SetSimulateDontHavesOnTimeout(false), - bitswap.WithoutDuplicatedBlockStats(), - } - - ctx := params.Ctx - if params.Metrics != nil { - // metrics scope is required for prometheus metrics and will be used as metrics name - // prefix - ctx = ipfsmetrics.CtxScope(ctx, "bitswap") - } - bs := bitswap.New(ctx, net, params.Bs, opts...) - - params.Lifecycle.Append(fx.Hook{ - OnStop: func(_ context.Context) (err error) { - return bs.Close() - }, - }) - return bs -} - -func blockstoreFromDatastore( - ctx context.Context, - ds datastore.Batching, - b blockstoreParams, -) (blockstore.Blockstore, error) { - if b.Metrics != nil { - // metrics scope is required for prometheus metrics and will be used as metrics name - // prefix - ctx = ipfsmetrics.CtxScope(ctx, "blockstore") - } - return blockstore.CachedBlockstore( - ctx, - blockstore.NewBlockstore(ds), - blockstore.CacheOpts{ - HasBloomFilterSize: defaultBloomFilterSize, - HasBloomFilterHashes: defaultBloomFilterHashes, - HasTwoQueueCacheSize: defaultARCCacheSize, - }, - ) -} - -func blockstoreFromEDSStore( - ctx context.Context, - store *eds.Store, - b blockstoreParams, -) (blockstore.Blockstore, error) { - if b.Metrics != nil { - // metrics scope is required for prometheus metrics and will be used as metrics name - // prefix - ctx = ipfsmetrics.CtxScope(ctx, "blockstore") - } - return blockstore.CachedBlockstore( - ctx, - store.Blockstore(), - blockstore.CacheOpts{ - HasTwoQueueCacheSize: defaultARCCacheSize, - }, - ) -} - -type blockstoreParams struct { - fx.In - // Metrics is unused, it is in dependency graph to ensure that prometheus metrics are enabled before bitswap - // is started. - Metrics *bitswapMetrics `optional:"true"` -} - -type bitSwapParams struct { - fx.In - - Lifecycle fx.Lifecycle - Ctx context.Context - Net Network - Host hst.Host - Bs blockstore.Blockstore - // Metrics is unused, it is in dependency graph to ensure that prometheus metrics are enabled before bitswap - // is started. - Metrics *bitswapMetrics `optional:"true"` -} - -func protocolID(network Network) protocol.ID { - return protocol.ID(fmt.Sprintf("/celestia/%s", network)) -} - -type bitswapMetrics struct{} - -func enableBitswapMetrics(_ prometheus.Registerer) *bitswapMetrics { - err := ipfsprom.Inject() - if err != nil { - log.Errorf("failed to inject bitswap metrics: %s", err) - return nil - } - return &bitswapMetrics{} -} diff --git a/nodebuilder/p2p/config.go b/nodebuilder/p2p/config.go index 5276ff9acb..32c60846f4 100644 --- a/nodebuilder/p2p/config.go +++ b/nodebuilder/p2p/config.go @@ -2,7 +2,6 @@ package p2p import ( "fmt" - "time" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" @@ -10,8 +9,6 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" ) -const defaultRoutingRefreshPeriod = time.Minute - // Config combines all configuration fields for P2P subsystem. type Config struct { // ListenAddresses - Addresses to listen to on local NIC. @@ -29,8 +26,7 @@ type Config struct { // This is enabled by default for Bootstrappers. PeerExchange bool // ConnManager is a configuration tuple for ConnectionManager. - ConnManager connManagerConfig - RoutingTableRefreshPeriod time.Duration + ConnManager connManagerConfig // Allowlist for IPColocation PubSub parameter, a list of string CIDRs IPColocationWhitelist []string @@ -64,10 +60,9 @@ func DefaultConfig(tp node.Type) Config { "/ip4/127.0.0.1/tcp/2121", "/ip6/::/tcp/2121", }, - MutualPeers: []string{}, - PeerExchange: tp == node.Bridge || tp == node.Full, - ConnManager: defaultConnManagerConfig(tp), - RoutingTableRefreshPeriod: defaultRoutingRefreshPeriod, + MutualPeers: []string{}, + PeerExchange: tp == node.Bridge || tp == node.Full, + ConnManager: defaultConnManagerConfig(tp), } } @@ -83,15 +78,6 @@ func (cfg *Config) mutualPeers() (_ []peer.AddrInfo, err error) { return peer.AddrInfosFromP2pAddrs(maddrs...) } -// Validate performs basic validation of the config. -func (cfg *Config) Validate() error { - if cfg.RoutingTableRefreshPeriod <= 0 { - cfg.RoutingTableRefreshPeriod = defaultRoutingRefreshPeriod - log.Warnf("routingTableRefreshPeriod is not valid. restoring to default value: %d", cfg.RoutingTableRefreshPeriod) - } - return nil -} - // Upgrade updates the `ListenAddresses` and `NoAnnounceAddresses` to // include support for websocket connections. func (cfg *Config) Upgrade() { diff --git a/nodebuilder/p2p/metrics.go b/nodebuilder/p2p/metrics.go index 84916d63ae..0372ccc408 100644 --- a/nodebuilder/p2p/metrics.go +++ b/nodebuilder/p2p/metrics.go @@ -19,7 +19,6 @@ func WithMetrics() fx.Option { return fx.Options( fx.Provide(resourceManagerOpt(traceReporter)), fx.Provide(prometheusMetrics), - fx.Provide(enableBitswapMetrics), ) } diff --git a/nodebuilder/p2p/module.go b/nodebuilder/p2p/module.go index 59d9fa5054..9538ce03a5 100644 --- a/nodebuilder/p2p/module.go +++ b/nodebuilder/p2p/module.go @@ -14,9 +14,7 @@ var log = logging.Logger("module/p2p") // ConstructModule collects all the components and services related to p2p. func ConstructModule(tp node.Type, cfg *Config) fx.Option { // sanitize config values before constructing module - cfgErr := cfg.Validate() baseComponents := fx.Options( - fx.Error(cfgErr), fx.Supply(cfg), fx.Provide(Key), fx.Provide(id), @@ -26,10 +24,9 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { fx.Provide(host), fx.Provide(routedHost), fx.Provide(pubSub), - fx.Provide(dataExchange), fx.Provide(ipld.NewBlockservice), fx.Provide(peerRouting), - fx.Provide(contentRouting), + fx.Provide(newDHT), fx.Provide(addrsFactory(cfg.AnnounceAddresses, cfg.NoAnnounceAddresses)), fx.Provide(metrics.NewBandwidthCounter), fx.Provide(newModule), @@ -43,14 +40,12 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { return fx.Module( "p2p", baseComponents, - fx.Provide(blockstoreFromEDSStore), fx.Provide(infiniteResources), ) case node.Light: return fx.Module( "p2p", baseComponents, - fx.Provide(blockstoreFromDatastore), fx.Provide(autoscaleResources), ) default: diff --git a/nodebuilder/p2p/routing.go b/nodebuilder/p2p/routing.go index edbf624b08..53f5377524 100644 --- a/nodebuilder/p2p/routing.go +++ b/nodebuilder/p2p/routing.go @@ -6,70 +6,52 @@ import ( "github.com/ipfs/go-datastore" dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" "go.uber.org/fx" "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery" ) -// contentRouting constructs nil content routing, -// as for our use-case existing ContentRouting mechanisms, e.g DHT, are unsuitable -func contentRouting(r routing.PeerRouting) routing.ContentRouting { - return r.(*dht.IpfsDHT) -} - -// peerRouting provides constructor for PeerRouting over DHT. -// Basically, this provides a way to discover peer addresses by respecting public keys. -func peerRouting(cfg *Config, tp node.Type, params routingParams) (routing.PeerRouting, error) { - opts := []dht.Option{ - dht.BootstrapPeers(params.Peers...), - dht.ProtocolPrefix(protocol.ID(fmt.Sprintf("/celestia/%s", params.Net))), - dht.Datastore(params.DataStore), - dht.RoutingTableRefreshPeriod(cfg.RoutingTableRefreshPeriod), - } - - if isBootstrapper() { - opts = append(opts, - dht.BootstrapPeers(), // no bootstrappers for a bootstrapper ¯\_(ツ)_/¯ - ) - } - +func newDHT( + ctx context.Context, + lc fx.Lifecycle, + tp node.Type, + network Network, + bootsrappers Bootstrappers, + host HostBase, + dataStore datastore.Batching, +) (*dht.IpfsDHT, error) { + var mode dht.ModeOpt switch tp { case node.Light: - opts = append(opts, - dht.Mode(dht.ModeClient), - ) + mode = dht.ModeClient case node.Bridge, node.Full: - opts = append(opts, - dht.Mode(dht.ModeServer), - ) + mode = dht.ModeServer default: return nil, fmt.Errorf("unsupported node type: %s", tp) } - d, err := dht.New(params.Ctx, params.Host, opts...) + // no bootstrappers for a bootstrapper ¯\_(ツ)_/¯ + // otherwise dht.Bootstrap(OnStart hook) will deadlock + if isBootstrapper() { + bootsrappers = nil + } + + dht, err := discovery.NewDHT(ctx, network.String(), bootsrappers, host, dataStore, mode) if err != nil { return nil, err } - params.Lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return d.Bootstrap(ctx) - }, - OnStop: func(context.Context) error { - return d.Close() - }, + stopFn := func(context.Context) error { + return dht.Close() + } + lc.Append(fx.Hook{ + OnStart: dht.Bootstrap, + OnStop: stopFn, }) - return d, nil + return dht, nil } -type routingParams struct { - fx.In - - Ctx context.Context - Net Network - Peers Bootstrappers - Lc fx.Lifecycle - Host HostBase - DataStore datastore.Batching +func peerRouting(dht *dht.IpfsDHT) routing.PeerRouting { + return dht } diff --git a/nodebuilder/share/bitswap.go b/nodebuilder/share/bitswap.go new file mode 100644 index 0000000000..9316075dfe --- /dev/null +++ b/nodebuilder/share/bitswap.go @@ -0,0 +1,93 @@ +package share + +import ( + "context" + "fmt" + + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + "github.com/ipfs/go-datastore" + ipfsmetrics "github.com/ipfs/go-metrics-interface" + ipfsprom "github.com/ipfs/go-metrics-prometheus" + hst "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/bitswap" + "github.com/celestiaorg/celestia-node/store" +) + +// dataExchange constructs Exchange(Bitswap Composition) for Shwap +func dataExchange(tp node.Type, params bitSwapParams) exchange.SessionExchange { + prefix := protocolID(params.Net) + net := bitswap.NewNetwork(params.Host, prefix) + + if params.PromReg != nil { + // metrics scope is required for prometheus metrics and will be used as metrics name prefix + params.Ctx = ipfsmetrics.CtxScope(params.Ctx, "bitswap") + err := ipfsprom.Inject() + if err != nil { + return nil + } + } + + switch tp { + case node.Full, node.Bridge: + bs := bitswap.New(params.Ctx, net, params.Bs) + net.Start(bs.Client, bs.Server) + params.Lifecycle.Append(fx.Hook{ + OnStop: func(_ context.Context) (err error) { + net.Stop() + return bs.Close() + }, + }) + return bs + case node.Light: + cl := bitswap.NewClient(params.Ctx, net, params.Bs) + net.Start(cl) + params.Lifecycle.Append(fx.Hook{ + OnStop: func(_ context.Context) (err error) { + net.Stop() + return cl.Close() + }, + }) + return cl + default: + panic(fmt.Sprintf("unsupported node type: %v", tp)) + } +} + +func blockstoreFromDatastore(ds datastore.Batching) (blockstore.Blockstore, error) { + return blockstore.NewBlockstore(ds), nil +} + +func blockstoreFromEDSStore(store *store.Store, blockStoreCacheSize int) (blockstore.Blockstore, error) { + if blockStoreCacheSize == 0 { + // no cache, return plain blockstore + return &bitswap.Blockstore{Getter: store}, nil + } + withCache, err := store.WithCache("blockstore", blockStoreCacheSize) + if err != nil { + return nil, fmt.Errorf("create cached store for blockstore:%w", err) + } + bs := &bitswap.Blockstore{Getter: withCache} + return bs, nil +} + +type bitSwapParams struct { + fx.In + + Lifecycle fx.Lifecycle + Ctx context.Context + Net p2p.Network + Host hst.Host + Bs blockstore.Blockstore + PromReg prometheus.Registerer `optional:"true"` +} + +func protocolID(network p2p.Network) protocol.ID { + return protocol.ID(fmt.Sprintf("/celestia/%s", network)) +} diff --git a/nodebuilder/share/cmd/share.go b/nodebuilder/share/cmd/share.go index 58d6befecb..1eb21a045f 100644 --- a/nodebuilder/share/cmd/share.go +++ b/nodebuilder/share/cmd/share.go @@ -32,7 +32,7 @@ var Cmd = &cobra.Command{ var sharesAvailableCmd = &cobra.Command{ Use: "available", - Short: "Subjectively validates if Shares committed to the given Root are available on the Network.", + Short: "Subjectively validates if Shares committed to the given EDS are available on the Network.", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { client, err := cmdnode.ParseClientFromCtx(cmd.Context()) diff --git a/nodebuilder/share/config.go b/nodebuilder/share/config.go index 1d984b6dca..6cf18332bc 100644 --- a/nodebuilder/share/config.go +++ b/nodebuilder/share/config.go @@ -5,17 +5,22 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/share/availability/light" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/p2p/discovery" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexeds" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" + "github.com/celestiaorg/celestia-node/store" +) + +const ( + defaultBlockstoreCacheSize = 128 ) // TODO: some params are pointers and other are not, Let's fix this. type Config struct { // EDSStoreParams sets eds store configuration parameters - EDSStoreParams *eds.Parameters + EDSStoreParams *store.Parameters + BlockStoreCacheSize uint UseShareExchange bool // ShrExEDSParams sets shrexeds client and server configuration parameters @@ -31,12 +36,13 @@ type Config struct { func DefaultConfig(tp node.Type) Config { cfg := Config{ - EDSStoreParams: eds.DefaultParameters(), - Discovery: discovery.DefaultParameters(), - ShrExEDSParams: shrexeds.DefaultParameters(), - ShrExNDParams: shrexnd.DefaultParameters(), - UseShareExchange: true, - PeerManagerParams: peers.DefaultParameters(), + EDSStoreParams: store.DefaultParameters(), + BlockStoreCacheSize: defaultBlockstoreCacheSize, + Discovery: discovery.DefaultParameters(), + ShrExEDSParams: shrexeds.DefaultParameters(), + ShrExNDParams: shrexnd.DefaultParameters(), + UseShareExchange: true, + PeerManagerParams: peers.DefaultParameters(), } if tp == node.Light { @@ -55,20 +61,23 @@ func (cfg *Config) Validate(tp node.Type) error { } if err := cfg.Discovery.Validate(); err != nil { - return fmt.Errorf("nodebuilder/share: %w", err) + return fmt.Errorf("discovery: %w", err) } if err := cfg.ShrExNDParams.Validate(); err != nil { - return fmt.Errorf("nodebuilder/share: %w", err) + return fmt.Errorf("shrexnd: %w", err) } if err := cfg.ShrExEDSParams.Validate(); err != nil { - return fmt.Errorf("nodebuilder/share: %w", err) + return fmt.Errorf("shrexeds: %w", err) } if err := cfg.PeerManagerParams.Validate(); err != nil { - return fmt.Errorf("nodebuilder/share: %w", err) + return fmt.Errorf("peer manager: %w", err) } + if err := cfg.EDSStoreParams.Validate(); err != nil { + return fmt.Errorf("eds store: %w", err) + } return nil } diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 49789a8771..660117725d 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -1,90 +1,46 @@ package share import ( - "context" - "errors" - - "github.com/filecoin-project/dagstore" - "github.com/ipfs/boxo/blockservice" - - "github.com/celestiaorg/celestia-app/v2/pkg/da" - headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/getters" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/bitswap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrex_getter" + "github.com/celestiaorg/celestia-node/store" ) -func newShareModule(getter share.Getter, avail share.Availability, header headerServ.Module) Module { +func newShareModule(getter shwap.Getter, avail share.Availability, header headerServ.Module) Module { return &module{getter, avail, header} } -// ensureEmptyCARExists adds an empty EDS to the provided EDS store. -func ensureEmptyCARExists(ctx context.Context, store *eds.Store) error { - emptyEDS := share.EmptyExtendedDataSquare() - emptyDAH, err := da.NewDataAvailabilityHeader(emptyEDS) - if err != nil { - return err - } - - err = store.Put(ctx, emptyDAH.Hash(), emptyEDS) - if errors.Is(err, dagstore.ErrShardExists) { - return nil - } - return err -} - -// ensureEmptyEDSInBS checks if the given DAG contains an empty block data square. -// If it does not, it stores an empty block. This optimization exists to prevent -// redundant storing of empty block data so that it is only stored once and returned -// upon request for a block with an empty data square. -func ensureEmptyEDSInBS(ctx context.Context, bServ blockservice.BlockService) error { - _, err := ipld.AddShares(ctx, share.EmptyBlockShares(), bServ) - return err -} - func lightGetter( - shrexGetter *getters.ShrexGetter, - ipldGetter *getters.IPLDGetter, + shrexGetter *shrex_getter.Getter, + bitswapGetter *bitswap.Getter, cfg Config, -) share.Getter { - var cascade []share.Getter - if cfg.UseShareExchange { - cascade = append(cascade, shrexGetter) - } - cascade = append(cascade, ipldGetter) - return getters.NewCascadeGetter(cascade) -} - -// ShrexGetter is added to bridge nodes for the case that a shard is removed -// after detected shard corruption. This ensures the block is fetched and stored -// by shrex the next time the data is retrieved (meaning shard recovery is -// manual after corruption is detected). -func bridgeGetter( - storeGetter *getters.StoreGetter, - shrexGetter *getters.ShrexGetter, - cfg Config, -) share.Getter { - var cascade []share.Getter - cascade = append(cascade, storeGetter) +) shwap.Getter { + var cascade []shwap.Getter if cfg.UseShareExchange { cascade = append(cascade, shrexGetter) } + cascade = append(cascade, bitswapGetter) return getters.NewCascadeGetter(cascade) } -func fullGetter( - storeGetter *getters.StoreGetter, - shrexGetter *getters.ShrexGetter, - ipldGetter *getters.IPLDGetter, +// Getter is added to bridge nodes for the case where Bridge nodes are +// running in a pruned mode. This ensures the block can be retrieved from +// the network if it was pruned from the local store. +func bridgeAndFullGetter( + storeGetter *store.Getter, + shrexGetter *shrex_getter.Getter, + bitswapGetter *bitswap.Getter, cfg Config, -) share.Getter { - var cascade []share.Getter +) shwap.Getter { + var cascade []shwap.Getter cascade = append(cascade, storeGetter) if cfg.UseShareExchange { cascade = append(cascade, shrexGetter) } - cascade = append(cascade, ipldGetter) + cascade = append(cascade, bitswapGetter) return getters.NewCascadeGetter(cascade) } diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index c24a5dc771..682d5d8d48 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -9,10 +9,10 @@ import ( reflect "reflect" header "github.com/celestiaorg/celestia-node/header" - share "github.com/celestiaorg/celestia-node/share" + share "github.com/celestiaorg/celestia-node/nodebuilder/share" + share0 "github.com/celestiaorg/celestia-node/share" rsmt2d "github.com/celestiaorg/rsmt2d" gomock "github.com/golang/mock/gomock" - types "github.com/tendermint/tendermint/types" ) // MockModule is a mock of Module interface. @@ -54,13 +54,12 @@ func (mr *MockModuleMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { } // GetRange mocks base method. -func (m *MockModule) GetRange(arg0 context.Context, arg1 uint64, arg2, arg3 int) ([][]byte, *types.ShareProof, error) { +func (m *MockModule) GetRange(arg0 context.Context, arg1 uint64, arg2, arg3 int) (*share.GetRangeResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetRange", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].([][]byte) - ret1, _ := ret[1].(*types.ShareProof) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret0, _ := ret[0].(*share.GetRangeResult) + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetRange indicates an expected call of GetRange. @@ -85,7 +84,7 @@ func (mr *MockModuleMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) * } // GetSharesByNamespace mocks base method. -func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share.Namespace) (share.NamespacedShares, error) { +func (m *MockModule) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share0.Namespace) (share.NamespacedShares, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) ret0, _ := ret[0].(share.NamespacedShares) diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index 5d0dbbd096..8b20c23585 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -2,7 +2,9 @@ package share import ( "context" + "fmt" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/host" "go.uber.org/fx" @@ -13,50 +15,45 @@ import ( "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/availability/full" "github.com/celestiaorg/celestia-node/share/availability/light" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/bitswap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrex_getter" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexeds" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" + "github.com/celestiaorg/celestia-node/store" ) func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option { // sanitize config values before constructing module - cfgErr := cfg.Validate(tp) + err := cfg.Validate(tp) + if err != nil { + return fx.Error(fmt.Errorf("nodebuilder/share: validate config: %w", err)) + } baseComponents := fx.Options( fx.Supply(*cfg), - fx.Error(cfgErr), fx.Options(options...), fx.Provide(newShareModule), availabilityComponents(tp, cfg), shrexComponents(tp, cfg), + bitswapComponents(tp, cfg), peerComponents(tp, cfg), ) switch tp { - case node.Bridge: - return fx.Module( - "share", - baseComponents, - edsStoreComponents(cfg), - fx.Provide(bridgeGetter), - ) - case node.Full: + case node.Bridge, node.Full: return fx.Module( "share", baseComponents, edsStoreComponents(cfg), - fx.Provide(getters.NewIPLDGetter), - fx.Provide(fullGetter), + fx.Provide(bridgeAndFullGetter), ) case node.Light: return fx.Module( "share", baseComponents, - fx.Invoke(ensureEmptyEDSInBS), - fx.Provide(getters.NewIPLDGetter), fx.Provide(lightGetter), ) default: @@ -64,6 +61,29 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option } } +func bitswapComponents(tp node.Type, cfg *Config) fx.Option { + opts := fx.Options( + fx.Provide(dataExchange), + fx.Provide(bitswap.NewGetter), + ) + switch tp { + case node.Light: + return fx.Options( + opts, + fx.Provide(blockstoreFromDatastore), + ) + case node.Full, node.Bridge: + return fx.Options( + opts, + fx.Provide(func(store *store.Store) (blockstore.Blockstore, error) { + return blockstoreFromEDSStore(store, int(cfg.BlockStoreCacheSize)) + }), + ) + default: + panic("invalid node type") + } +} + func shrexComponents(tp node.Type, cfg *Config) fx.Option { opts := fx.Options( fx.Provide( @@ -92,8 +112,8 @@ func shrexComponents(tp node.Type, cfg *Config) fx.Option { edsClient *shrexeds.Client, ndClient *shrexnd.Client, managers map[string]*peers.Manager, - ) *getters.ShrexGetter { - return getters.NewShrexGetter( + ) *shrex_getter.Getter { + return shrex_getter.NewGetter( edsClient, ndClient, managers[fullNodesTag], @@ -101,10 +121,10 @@ func shrexComponents(tp node.Type, cfg *Config) fx.Option { lightprune.Window, ) }, - fx.OnStart(func(ctx context.Context, getter *getters.ShrexGetter) error { + fx.OnStart(func(ctx context.Context, getter *shrex_getter.Getter) error { return getter.Start(ctx) }), - fx.OnStop(func(ctx context.Context, getter *getters.ShrexGetter) error { + fx.OnStop(func(ctx context.Context, getter *shrex_getter.Getter) error { return getter.Stop(ctx) }), )), @@ -125,7 +145,7 @@ func shrexComponents(tp node.Type, cfg *Config) fx.Option { return fx.Options( opts, shrexServerComponents(cfg), - fx.Provide(getters.NewStoreGetter), + fx.Provide(store.NewGetter), fx.Provide(func(shrexSub *shrexsub.PubSub) shrexsub.BroadcastFn { return shrexSub.Broadcast }), @@ -134,7 +154,7 @@ func shrexComponents(tp node.Type, cfg *Config) fx.Option { return fx.Options( opts, shrexServerComponents(cfg), - fx.Provide(getters.NewStoreGetter), + fx.Provide(store.NewGetter), fx.Provide(func(shrexSub *shrexsub.PubSub) shrexsub.BroadcastFn { return shrexSub.Broadcast }), @@ -155,7 +175,7 @@ func shrexServerComponents(cfg *Config) fx.Option { return fx.Options( fx.Invoke(func(_ *shrexeds.Server, _ *shrexnd.Server) {}), fx.Provide(fx.Annotate( - func(host host.Host, store *eds.Store, network modp2p.Network) (*shrexeds.Server, error) { + func(host host.Host, store *store.Store, network modp2p.Network) (*shrexeds.Server, error) { cfg.ShrExEDSParams.WithNetworkID(network.String()) return shrexeds.NewServer(cfg.ShrExEDSParams, host, store) }, @@ -169,7 +189,7 @@ func shrexServerComponents(cfg *Config) fx.Option { fx.Provide(fx.Annotate( func( host host.Host, - store *eds.Store, + store *store.Store, network modp2p.Network, ) (*shrexnd.Server, error) { cfg.ShrExNDParams.WithNetworkID(network.String()) @@ -188,17 +208,10 @@ func shrexServerComponents(cfg *Config) fx.Option { func edsStoreComponents(cfg *Config) fx.Option { return fx.Options( fx.Provide(fx.Annotate( - func(path node.StorePath, ds datastore.Batching) (*eds.Store, error) { - return eds.NewStore(cfg.EDSStoreParams, string(path), ds) + func(path node.StorePath) (*store.Store, error) { + return store.NewStore(cfg.EDSStoreParams, string(path)) }, - fx.OnStart(func(ctx context.Context, store *eds.Store) error { - err := store.Start(ctx) - if err != nil { - return err - } - return ensureEmptyCARExists(ctx, store) - }), - fx.OnStop(func(ctx context.Context, store *eds.Store) error { + fx.OnStop(func(ctx context.Context, store *store.Store) error { return store.Stop(ctx) }), )), @@ -210,7 +223,7 @@ func availabilityComponents(tp node.Type, cfg *Config) fx.Option { case node.Light: return fx.Options( fx.Provide(fx.Annotate( - func(getter share.Getter, ds datastore.Batching) *light.ShareAvailability { + func(getter shwap.Getter, ds datastore.Batching) *light.ShareAvailability { return light.NewShareAvailability( getter, ds, diff --git a/nodebuilder/share/opts.go b/nodebuilder/share/opts.go index 9c122b7b0f..cfea26dbb4 100644 --- a/nodebuilder/share/opts.go +++ b/nodebuilder/share/opts.go @@ -3,12 +3,12 @@ package share import ( "errors" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/getters" - disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrex_getter" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexeds" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" + "github.com/celestiaorg/celestia-node/store" ) // WithPeerManagerMetrics is a utility function to turn on peer manager metrics and that is @@ -23,7 +23,7 @@ func WithPeerManagerMetrics(managers map[string]*peers.Manager) error { // WithDiscoveryMetrics is a utility function to turn on discovery metrics and that is expected to // be "invoked" by the fx lifecycle. -func WithDiscoveryMetrics(discs []*disc.Discovery) error { +func WithDiscoveryMetrics(discs []*discovery.Discovery) error { var err error for _, disc := range discs { err = errors.Join(err, disc.WithMetrics()) @@ -49,10 +49,10 @@ func WithShrexServerMetrics(edsServer *shrexeds.Server, ndServer *shrexnd.Server return ndServer.WithMetrics() } -func WithShrexGetterMetrics(sg *getters.ShrexGetter) error { +func WithShrexGetterMetrics(sg *shrex_getter.Getter) error { return sg.WithMetrics() } -func WithStoreMetrics(s *eds.Store) error { +func WithStoreMetrics(s *store.Store) error { return s.WithMetrics() } diff --git a/nodebuilder/share/p2p_constructors.go b/nodebuilder/share/p2p_constructors.go index aae1a325aa..b02ea1a49e 100644 --- a/nodebuilder/share/p2p_constructors.go +++ b/nodebuilder/share/p2p_constructors.go @@ -1,8 +1,9 @@ package share import ( + dht "github.com/libp2p/go-libp2p-kad-dht" + p2pdisc "github.com/libp2p/go-libp2p/core/discovery" "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/routing" routingdisc "github.com/libp2p/go-libp2p/p2p/discovery/routing" "github.com/libp2p/go-libp2p/p2p/net/conngater" "go.uber.org/fx" @@ -13,9 +14,9 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" modprune "github.com/celestiaorg/celestia-node/nodebuilder/pruner" - disc "github.com/celestiaorg/celestia-node/share/p2p/discovery" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) const ( @@ -24,11 +25,16 @@ const ( // archivalNodesTag is the tag used to identify archival nodes in the // discovery service. archivalNodesTag = "archival" + + // discovery version is a prefix for all tags used in discovery. It is bumped when + // there are protocol breaking changes. + version = "v0.1.0" ) // TODO @renaynay: rename func peerComponents(tp node.Type, cfg *Config) fx.Option { return fx.Options( + fx.Provide(routingDiscovery), fullDiscoveryAndPeerManager(tp, cfg), archivalDiscoveryAndPeerManager(tp, cfg), ) @@ -42,14 +48,14 @@ func fullDiscoveryAndPeerManager(tp node.Type, cfg *Config) fx.Option { func( lc fx.Lifecycle, host host.Host, - r routing.ContentRouting, connGater *conngater.BasicConnectionGater, + disc p2pdisc.Discovery, shrexSub *shrexsub.PubSub, headerSub libhead.Subscriber[*header.ExtendedHeader], // we must ensure Syncer is started before PeerManager // so that Syncer registers header validator before PeerManager subscribes to headers _ *sync.Syncer[*header.ExtendedHeader], - ) (*peers.Manager, *disc.Discovery, error) { + ) (*peers.Manager, *discovery.Discovery, error) { var managerOpts []peers.Option if tp != node.Bridge { // BNs do not need the overhead of shrexsub peer pools as @@ -68,17 +74,18 @@ func fullDiscoveryAndPeerManager(tp node.Type, cfg *Config) fx.Option { return nil, nil, err } - discOpts := []disc.Option{disc.WithOnPeersUpdate(fullManager.UpdateNodePool)} + discOpts := []discovery.Option{discovery.WithOnPeersUpdate(fullManager.UpdateNodePool)} if tp != node.Light { // only FN and BNs should advertise to `full` topic - discOpts = append(discOpts, disc.WithAdvertise()) + discOpts = append(discOpts, discovery.WithAdvertise()) } - fullDisc, err := disc.NewDiscovery( + fullDisc, err := discovery.NewDiscovery( cfg.Discovery, host, - routingdisc.NewRoutingDiscovery(r), + disc, + version, fullNodesTag, discOpts..., ) @@ -100,12 +107,12 @@ func archivalDiscoveryAndPeerManager(tp node.Type, cfg *Config) fx.Option { func( lc fx.Lifecycle, pruneCfg *modprune.Config, - d *disc.Discovery, - manager *peers.Manager, + fullDisc *discovery.Discovery, + fullManager *peers.Manager, h host.Host, - r routing.ContentRouting, + disc p2pdisc.Discovery, gater *conngater.BasicConnectionGater, - ) (map[string]*peers.Manager, []*disc.Discovery, error) { + ) (map[string]*peers.Manager, []*discovery.Discovery, error) { archivalPeerManager, err := peers.NewManager( cfg.PeerManagerParams, h, @@ -116,16 +123,17 @@ func archivalDiscoveryAndPeerManager(tp node.Type, cfg *Config) fx.Option { return nil, nil, err } - discOpts := []disc.Option{disc.WithOnPeersUpdate(archivalPeerManager.UpdateNodePool)} + discOpts := []discovery.Option{discovery.WithOnPeersUpdate(archivalPeerManager.UpdateNodePool)} if (tp == node.Bridge || tp == node.Full) && !pruneCfg.EnableService { - discOpts = append(discOpts, disc.WithAdvertise()) + discOpts = append(discOpts, discovery.WithAdvertise()) } - archivalDisc, err := disc.NewDiscovery( + archivalDisc, err := discovery.NewDiscovery( cfg.Discovery, h, - routingdisc.NewRoutingDiscovery(r), + disc, + version, archivalNodesTag, discOpts..., ) @@ -137,7 +145,11 @@ func archivalDiscoveryAndPeerManager(tp node.Type, cfg *Config) fx.Option { OnStop: archivalDisc.Stop, }) - managers := map[string]*peers.Manager{fullNodesTag: manager, archivalNodesTag: archivalPeerManager} - return managers, []*disc.Discovery{d, archivalDisc}, nil + managers := map[string]*peers.Manager{fullNodesTag: fullManager, archivalNodesTag: archivalPeerManager} + return managers, []*discovery.Discovery{fullDisc, archivalDisc}, nil }) } + +func routingDiscovery(dht *dht.IpfsDHT) p2pdisc.Discovery { + return routingdisc.NewRoutingDiscovery(dht) +} diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index a2cef51170..b9d21953bf 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -5,12 +5,14 @@ import ( "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" ) var _ Module = (*API)(nil) @@ -50,7 +52,7 @@ type Module interface { // Shares are returned in a row-by-row order if the namespace spans multiple rows. GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, - ) (share.NamespacedShares, error) + ) (NamespacedShares, error) // GetRange gets a list of shares and their corresponding proof. GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) } @@ -73,7 +75,7 @@ type API struct { ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, - ) (share.NamespacedShares, error) `perm:"read"` + ) (NamespacedShares, error) `perm:"read"` GetRange func( ctx context.Context, height uint64, @@ -102,12 +104,12 @@ func (api *API) GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, -) (share.NamespacedShares, error) { +) (NamespacedShares, error) { return api.Internal.GetSharesByNamespace(ctx, header, namespace) } type module struct { - share.Getter + shwap.Getter share.Availability hs headerServ.Module } @@ -125,6 +127,7 @@ func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*G if err != nil { return nil, err } + proof, err := eds.ProveShares(extendedDataSquare, start, end) if err != nil { return nil, err @@ -134,3 +137,46 @@ func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*G proof, }, nil } + +func (m module) GetSharesByNamespace( + ctx context.Context, + header *header.ExtendedHeader, + namespace share.Namespace, +) (NamespacedShares, error) { + nd, err := m.Getter.GetSharesByNamespace(ctx, header, namespace) + if err != nil { + return nil, err + } + return convertToNamespacedShares(nd), nil +} + +// NamespacedShares represents all shares with proofs within a specific namespace of an EDS. +// This is a copy of the share.NamespacedShares type, that is used to avoid breaking changes +// in the API. +type NamespacedShares []NamespacedRow + +// NamespacedRow represents all shares with proofs within a specific namespace of a single EDS row. +type NamespacedRow struct { + Shares []share.Share `json:"shares"` + Proof *nmt.Proof `json:"proof"` +} + +// Flatten returns the concatenated slice of all NamespacedRow shares. +func (ns NamespacedShares) Flatten() []share.Share { + var shares []share.Share + for _, row := range ns { + shares = append(shares, row.Shares...) + } + return shares +} + +func convertToNamespacedShares(nd shwap.NamespaceData) NamespacedShares { + ns := make(NamespacedShares, 0, len(nd)) + for _, row := range nd { + ns = append(ns, NamespacedRow{ + Shares: row.Shares, + Proof: row.Proof, + }) + } + return ns +} diff --git a/nodebuilder/share/share_test.go b/nodebuilder/share/share_test.go deleted file mode 100644 index db170709db..0000000000 --- a/nodebuilder/share/share_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package share - -import ( - "context" - "testing" - - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" -) - -func Test_EmptyCARExists(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), ds) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds := share.EmptyExtendedDataSquare() - dah, err := share.NewRoot(eds) - require.NoError(t, err) - - // add empty EDS to store - err = ensureEmptyCARExists(ctx, edsStore) - assert.NoError(t, err) - - // assert that the empty car exists - has, err := edsStore.Has(ctx, dah.Hash()) - assert.True(t, has) - assert.NoError(t, err) - - // assert that the empty car is, in fact, empty - emptyEds, err := edsStore.Get(ctx, dah.Hash()) - assert.Equal(t, eds.Flattened(), emptyEds.Flattened()) - assert.NoError(t, err) -} diff --git a/nodebuilder/state/mocks/api.go b/nodebuilder/state/mocks/api.go index a4fb8d8cec..6b75b48a52 100644 --- a/nodebuilder/state/mocks/api.go +++ b/nodebuilder/state/mocks/api.go @@ -10,10 +10,10 @@ import ( math "cosmossdk.io/math" state "github.com/celestiaorg/celestia-node/state" + blob "github.com/celestiaorg/go-square/blob" types "github.com/cosmos/cosmos-sdk/types" types0 "github.com/cosmos/cosmos-sdk/x/staking/types" gomock "github.com/golang/mock/gomock" - types1 "github.com/tendermint/tendermint/proto/tendermint/types" ) // MockModule is a mock of Module interface. @@ -205,7 +205,7 @@ func (mr *MockModuleMockRecorder) RevokeGrantFee(arg0, arg1, arg2 interface{}) * } // SubmitPayForBlob mocks base method. -func (m *MockModule) SubmitPayForBlob(arg0 context.Context, arg1 []*types1.Blob, arg2 *state.TxConfig) (*types.TxResponse, error) { +func (m *MockModule) SubmitPayForBlob(arg0 context.Context, arg1 []*blob.Blob, arg2 *state.TxConfig) (*types.TxResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubmitPayForBlob", arg0, arg1, arg2) ret0, _ := ret[0].(*types.TxResponse) diff --git a/nodebuilder/store_test.go b/nodebuilder/store_test.go index 45a9c3960a..2b7cdd23de 100644 --- a/nodebuilder/store_test.go +++ b/nodebuilder/store_test.go @@ -3,28 +3,16 @@ package nodebuilder import ( - "context" "fmt" "strconv" "testing" - "time" "github.com/gofrs/flock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-app/v2/pkg/da" - "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" ) func TestRepo(t *testing.T) { @@ -68,101 +56,6 @@ func TestRepo(t *testing.T) { } } -func BenchmarkStore(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - b.Cleanup(cancel) - - // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) - b.Run("bench put 128", func(b *testing.B) { - dir := b.TempDir() - err := Init(*DefaultConfig(node.Full), dir, node.Full) - require.NoError(b, err) - - store := newStore(ctx, b, eds.DefaultParameters(), dir) - size := 128 - b.Run("enabled eds proof caching", func(b *testing.B) { - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - adder := ipld.NewProofsAdder(size * 2) - shares := sharetest.RandShares(b, size*size) - eds, err := rsmt2d.ComputeExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(size), - nmt.NodeVisitor(adder.VisitFn())), - ) - require.NoError(b, err) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - ctx := ipld.CtxWithProofsAdder(ctx, adder) - - b.StartTimer() - err = store.edsStore.Put(ctx, dah.Hash(), eds) - b.StopTimer() - require.NoError(b, err) - } - }) - - b.Run("disabled eds proof caching", func(b *testing.B) { - b.ResetTimer() - b.StopTimer() - for i := 0; i < b.N; i++ { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - - b.StartTimer() - err = store.edsStore.Put(ctx, dah.Hash(), eds) - b.StopTimer() - require.NoError(b, err) - } - }) - }) -} - -func TestStoreRestart(t *testing.T) { - const ( - blocks = 5 - size = 32 - ) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - dir := t.TempDir() - err := Init(*DefaultConfig(node.Full), dir, node.Full) - require.NoError(t, err) - - store := newStore(ctx, t, eds.DefaultParameters(), dir) - - hashes := make([][]byte, blocks) - for i := range hashes { - edss := edstest.RandEDS(t, size) - require.NoError(t, err) - dah, err := da.NewDataAvailabilityHeader(edss) - require.NoError(t, err) - err = store.edsStore.Put(ctx, dah.Hash(), edss) - require.NoError(t, err) - - // store hashes for read loop later - hashes[i] = dah.Hash() - } - - // restart store - store.stop(ctx, t) - store = newStore(ctx, t, eds.DefaultParameters(), dir) - - for _, h := range hashes { - edsReader, err := store.edsStore.GetCAR(ctx, h) - require.NoError(t, err) - odsReader, err := eds.ODSReader(edsReader) - require.NoError(t, err) - _, err = eds.ReadEDS(ctx, odsReader, h) - require.NoError(t, err) - require.NoError(t, edsReader.Close()) - } -} - func TestDiscoverOpened(t *testing.T) { t.Run("single open store", func(t *testing.T) { _, dir := initAndOpenStore(t, node.Full) @@ -262,28 +155,3 @@ func initAndOpenStore(t *testing.T, tp node.Type) (store Store, dir string) { require.NoError(t, err) return store, dir } - -type store struct { - s Store - edsStore *eds.Store -} - -func newStore(ctx context.Context, t testing.TB, params *eds.Parameters, dir string) store { - s, err := OpenStore(dir, nil) - require.NoError(t, err) - ds, err := s.Datastore() - require.NoError(t, err) - edsStore, err := eds.NewStore(params, dir, ds) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - return store{ - s: s, - edsStore: edsStore, - } -} - -func (s *store) stop(ctx context.Context, t *testing.T) { - require.NoError(t, s.edsStore.Stop(ctx)) - require.NoError(t, s.s.Close()) -} diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index c5d1cc7e0f..524b306ac0 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -7,8 +7,6 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -23,8 +21,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/store" ) /* @@ -46,7 +44,7 @@ Another note: this test disables share exchange to speed up test results. 9. Try to start a Full Node(FN) that contains a BEFP in its store. */ func TestFraudProofHandling(t *testing.T) { - t.Skip("skipping to please the CI and shwap skips it anyway") + t.Skip("unsupported temporary") ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) @@ -62,11 +60,9 @@ func TestFraudProofHandling(t *testing.T) { set, val := sw.Validators(t) fMaker := headerfraud.NewFraudMaker(t, 10, []types.PrivValidator{val}, set) - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(storeCfg, t.TempDir(), ds) + storeCfg := store.DefaultParameters() + edsStore, err := store.NewStore(storeCfg, t.TempDir()) require.NoError(t, err) - require.NoError(t, edsStore.Start(ctx)) t.Cleanup(func() { _ = edsStore.Stop(ctx) }) diff --git a/nodebuilder/tests/helpers_test.go b/nodebuilder/tests/helpers_test.go index 978b66553d..87179a121e 100644 --- a/nodebuilder/tests/helpers_test.go +++ b/nodebuilder/tests/helpers_test.go @@ -30,6 +30,5 @@ func getAdminClient(ctx context.Context, nd *nodebuilder.Node, t *testing.T) *cl } func setTimeInterval(cfg *nodebuilder.Config, interval time.Duration) { - cfg.P2P.RoutingTableRefreshPeriod = interval cfg.Share.Discovery.AdvertiseInterval = interval } diff --git a/nodebuilder/tests/nd_test.go b/nodebuilder/tests/nd_test.go index d82f405a0c..960062bc9e 100644 --- a/nodebuilder/tests/nd_test.go +++ b/nodebuilder/tests/nd_test.go @@ -17,9 +17,11 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/getters" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrex_getter" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" + "github.com/celestiaorg/celestia-node/store" ) func TestShrexNDFromLights(t *testing.T) { @@ -177,7 +179,7 @@ func replaceNDServer(cfg *nodebuilder.Config, handler network.StreamHandler) fx. return fx.Decorate(fx.Annotate( func( host host.Host, - store *eds.Store, + store *store.Store, network p2p.Network, ) (*shrexnd.Server, error) { cfg.Share.ShrExNDParams.WithNetworkID(network.String()) @@ -198,12 +200,12 @@ func replaceShareGetter() fx.Option { return fx.Decorate(fx.Annotate( func( host host.Host, - store *eds.Store, - storeGetter *getters.StoreGetter, - shrexGetter *getters.ShrexGetter, + store *store.Store, + storeGetter *store.Getter, + shrexGetter *shrex_getter.Getter, network p2p.Network, - ) share.Getter { - cascade := make([]share.Getter, 0, 2) + ) shwap.Getter { + cascade := make([]shwap.Getter, 0, 2) cascade = append(cascade, storeGetter) cascade = append(cascade, shrexGetter) return getters.NewCascadeGetter(cascade) diff --git a/nodebuilder/tests/prune_test.go b/nodebuilder/tests/prune_test.go index 720f6397ef..77fbc61132 100644 --- a/nodebuilder/tests/prune_test.go +++ b/nodebuilder/tests/prune_test.go @@ -21,10 +21,10 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrex_getter" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexeds" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" ) // TestArchivalBlobSync tests whether a LN is able to sync historical blobs from @@ -73,15 +73,15 @@ func TestArchivalBlobSync(t *testing.T) { edsClient *shrexeds.Client, ndClient *shrexnd.Client, managers map[string]*peers.Manager, - ) *getters.ShrexGetter { - return getters.NewShrexGetter( + ) *shrex_getter.Getter { + return shrex_getter.NewGetter( edsClient, ndClient, managers["full"], managers["archival"], testAvailWindow, ) - }, new(getters.ShrexGetter)), + }, new(shrex_getter.Getter)), ) // stop the archival BN to force LN to have to discover @@ -120,7 +120,7 @@ func TestArchivalBlobSync(t *testing.T) { eh, err := archivalFN.HeaderServ.GetByHeight(ctx, uint64(i)) require.NoError(t, err) - if bytes.Equal(eh.DataHash, share.EmptyRoot().Hash()) { + if bytes.Equal(eh.DataHash, share.EmptyEDSRoots().Hash()) { i++ continue } @@ -149,7 +149,7 @@ func TestArchivalBlobSync(t *testing.T) { // with the historical blobs for _, pruned := range pruningFulls { for _, b := range archivalBlobs { - has, err := pruned.EDSStore.Has(ctx, b.root) + has, err := pruned.EDSStore.HasByHeight(ctx, b.height) require.NoError(t, err) assert.False(t, has) } diff --git a/nodebuilder/tests/reconstruct_test.go b/nodebuilder/tests/reconstruct_test.go index 98789cbcc1..70bcca05be 100644 --- a/nodebuilder/tests/reconstruct_test.go +++ b/nodebuilder/tests/reconstruct_test.go @@ -36,6 +36,7 @@ Steps: 5. Check that a FN can retrieve shares from 1 to 20 blocks */ func TestFullReconstructFromBridge(t *testing.T) { + t.Skip() const ( blocks = 20 bsize = 16 @@ -86,6 +87,7 @@ Test-Case: Full Node reconstructs blocks from each other, after unsuccessfully s block from LN subnetworks. Analog to TestShareAvailable_DisconnectedFullNodes. */ func TestFullReconstructFromFulls(t *testing.T) { + t.Skip() if testing.Short() { t.Skip() } @@ -256,6 +258,7 @@ Steps: 9. Check that the FN can retrieve shares from 1 to 20 blocks */ func TestFullReconstructFromLights(t *testing.T) { + t.Skip() if testing.Short() { t.Skip() } diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index e601ba5a13..d11c994229 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -9,8 +9,6 @@ import ( "testing" "time" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -33,7 +31,7 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/state" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store" ) var blackholeIP6 = net.ParseIP("100::") @@ -178,8 +176,7 @@ func (s *Swamp) setupGenesis() { // ensure core has surpassed genesis block s.WaitTillHeight(ctx, 2) - ds := ds_sync.MutexWrap(ds.NewMapDatastore()) - store, err := eds.NewStore(eds.DefaultParameters(), s.t.TempDir(), ds) + store, err := store.NewStore(store.DefaultParameters(), s.t.TempDir()) require.NoError(s.t, err) ex, err := core.NewExchange( diff --git a/pruner/full/pruner.go b/pruner/full/pruner.go index 49967b5050..5b3f409b41 100644 --- a/pruner/full/pruner.go +++ b/pruner/full/pruner.go @@ -2,38 +2,30 @@ package full import ( "context" - "errors" - "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store" ) var log = logging.Logger("pruner/full") type Pruner struct { - store *eds.Store + store *store.Store } -func NewPruner(store *eds.Store) *Pruner { +func NewPruner(store *store.Store) *Pruner { return &Pruner{ store: store, } } func (p *Pruner) Prune(ctx context.Context, eh *header.ExtendedHeader) error { - // short circuit on empty roots - if eh.DAH.Equals(share.EmptyRoot()) { - return nil - } - log.Debugf("pruning header %s", eh.DAH.Hash()) - err := p.store.Remove(ctx, eh.DAH.Hash()) - if err != nil && !errors.Is(err, dagstore.ErrShardUnknown) { + err := p.store.RemoveODSQ4(ctx, eh.Height(), eh.DAH.Hash()) + if err != nil { return err } return nil diff --git a/pruner/light/pruner.go b/pruner/light/pruner.go index e6c8a6601b..5739cd8cee 100644 --- a/pruner/light/pruner.go +++ b/pruner/light/pruner.go @@ -24,7 +24,7 @@ func NewPruner(bstore blockstore.Blockstore, ds datastore.Batching) pruner.Prune func (p *Pruner) Prune(ctx context.Context, h *header.ExtendedHeader) error { dah := h.DAH - if share.DataHash(dah.Hash()).IsEmptyRoot() { + if share.DataHash(dah.Hash()).IsEmptyEDS() { return nil } @@ -41,6 +41,6 @@ func (p *Pruner) Prune(ctx context.Context, h *header.ExtendedHeader) error { return p.ds.Delete(ctx, rootKey(dah)) } -func rootKey(root *share.Root) datastore.Key { +func rootKey(root *share.AxisRoots) datastore.Key { return datastore.NewKey(root.String()) } diff --git a/share/availability.go b/share/availability.go index 84ae08bc53..3373a62276 100644 --- a/share/availability.go +++ b/share/availability.go @@ -4,29 +4,12 @@ import ( "context" "errors" - "github.com/celestiaorg/celestia-app/v2/pkg/da" - "github.com/celestiaorg/rsmt2d" - "github.com/celestiaorg/celestia-node/header" ) // ErrNotAvailable is returned whenever DA sampling fails. var ErrNotAvailable = errors.New("share: data not available") -// Root represents root commitment to multiple Shares. -// In practice, it is a commitment to all the Data in a square. -type Root = da.DataAvailabilityHeader - -// NewRoot generates Root(DataAvailabilityHeader) using the -// provided extended data square. -func NewRoot(eds *rsmt2d.ExtendedDataSquare) (*Root, error) { - dah, err := da.NewDataAvailabilityHeader(eds) - if err != nil { - return nil, err - } - return &dah, nil -} - // Availability defines interface for validation of Shares' availability. // //go:generate mockgen -destination=availability/mocks/availability.go -package=mocks . Availability diff --git a/share/availability/full/availability.go b/share/availability/full/availability.go index 573819dc9f..91550849c8 100644 --- a/share/availability/full/availability.go +++ b/share/availability/full/availability.go @@ -5,14 +5,15 @@ import ( "errors" "fmt" - "github.com/filecoin-project/dagstore" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/pruner/full" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/store" ) var log = logging.Logger("share/full") @@ -21,14 +22,14 @@ var log = logging.Logger("share/full") // recovery technique. It is considered "full" because it is required // to download enough shares to fully reconstruct the data square. type ShareAvailability struct { - store *eds.Store - getter share.Getter + store *store.Store + getter shwap.Getter } // NewShareAvailability creates a new full ShareAvailability. func NewShareAvailability( - store *eds.Store, - getter share.Getter, + store *store.Store, + getter shwap.Getter, ) *ShareAvailability { return &ShareAvailability{ store: store, @@ -40,13 +41,17 @@ func NewShareAvailability( // enough Shares from the network. func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { dah := header.DAH - // short-circuit if the given root is minimum DAH of an empty data square, to avoid datastore hit - if share.DataHash(dah.Hash()).IsEmptyRoot() { + // if the data square is empty, we can safely link the header height in the store to an empty EDS. + if share.DataHash(dah.Hash()).IsEmptyEDS() { + err := fa.store.PutODSQ4(ctx, dah, header.Height(), share.EmptyEDS()) + if err != nil { + return fmt.Errorf("put empty EDS: %w", err) + } return nil } // we assume the caller of this method has already performed basic validation on the - // given dah/root. If for some reason this has not happened, the node should panic. + // given roots. If for some reason this has not happened, the node should panic. if err := dah.ValidateBasic(); err != nil { log.Errorw("Availability validation cannot be performed on a malformed DataAvailabilityHeader", "err", err) @@ -54,14 +59,10 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header } // a hack to avoid loading the whole EDS in mem if we store it already. - if ok, _ := fa.store.Has(ctx, dah.Hash()); ok { + if ok, _ := fa.store.HasByHeight(ctx, header.Height()); ok { return nil } - adder := ipld.NewProofsAdder(len(dah.RowRoots)) - ctx = ipld.CtxWithProofsAdder(ctx, adder) - defer adder.Purge() - eds, err := fa.getter.GetEDS(ctx, header) if err != nil { if errors.Is(err, context.Canceled) { @@ -69,14 +70,20 @@ func (fa *ShareAvailability) SharesAvailable(ctx context.Context, header *header } log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error()) var byzantineErr *byzantine.ErrByzantine - if errors.Is(err, share.ErrNotFound) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { + if errors.Is(err, shwap.ErrNotFound) || errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &byzantineErr) { return share.ErrNotAvailable } return err } - err = fa.store.Put(ctx, dah.Hash(), eds) - if err != nil && !errors.Is(err, dagstore.ErrShardExists) { + // archival nodes should not store Q4 outside the availability window. + if pruner.IsWithinAvailabilityWindow(header.Time(), full.Window) { + err = fa.store.PutODSQ4(ctx, dah, header.Height(), eds) + } else { + err = fa.store.PutODS(ctx, dah, header.Height(), eds) + } + + if err != nil { return fmt.Errorf("full availability: failed to store eds: %w", err) } return nil diff --git a/share/availability/full/availability_test.go b/share/availability/full/availability_test.go index 400acfa087..95f8bda533 100644 --- a/share/availability/full/availability_test.go +++ b/share/availability/full/availability_test.go @@ -3,77 +3,94 @@ package full import ( "context" "testing" + "time" "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-app/v2/pkg/da" - "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/mocks" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/getters/mock" + "github.com/celestiaorg/celestia-node/store" ) -func TestShareAvailableOverMocknet_Full(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) +func TestSharesAvailable(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - net := availability_test.NewTestDAGNet(ctx, t) - _, root := RandNode(net, 32) + // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it + eds := edstest.RandEDS(t, 16) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) - eh := headertest.RandExtendedHeaderWithRoot(t, root) - nd := Node(net) - net.ConnectAll() + getter := mock.NewMockGetter(gomock.NewController(t)) + getter.EXPECT().GetEDS(gomock.Any(), eh).Return(eds, nil) - err := nd.SharesAvailable(ctx, eh) - assert.NoError(t, err) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) + avail := NewShareAvailability(store, getter) + err = avail.SharesAvailable(ctx, eh) + require.NoError(t, err) + + // Check if the store has the root + has, err := store.HasByHash(ctx, roots.Hash()) + require.NoError(t, err) + require.True(t, has) + + // Check if the store has the root linked to the height + has, err = store.HasByHeight(ctx, eh.Height()) + require.NoError(t, err) + require.True(t, has) } -func TestSharesAvailable_Full(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) +func TestSharesAvailable_StoredEds(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it - getter, dah := GetterWithRandSquare(t, 16) + eds := edstest.RandEDS(t, 4) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + require.NoError(t, err) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - avail := TestAvailability(t, getter) - err := avail.SharesAvailable(ctx, eh) - assert.NoError(t, err) -} + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) + avail := NewShareAvailability(store, nil) -func TestSharesAvailable_StoresToEDSStore(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + err = store.PutODSQ4(ctx, roots, eh.Height(), eds) + require.NoError(t, err) - // RandServiceWithSquare creates a NewShareAvailability inside, so we can test it - getter, dah := GetterWithRandSquare(t, 16) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - avail := TestAvailability(t, getter) - err := avail.SharesAvailable(ctx, eh) - assert.NoError(t, err) - - has, err := avail.store.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.True(t, has) + has, err := store.HasByHeight(ctx, eh.Height()) + require.NoError(t, err) + require.True(t, has) + + err = avail.SharesAvailable(ctx, eh) + require.NoError(t, err) + + has, err = store.HasByHeight(ctx, eh.Height()) + require.NoError(t, err) + require.True(t, has) } -func TestSharesAvailable_Full_ErrNotAvailable(t *testing.T) { +func TestSharesAvailable_ErrNotAvailable(t *testing.T) { ctrl := gomock.NewController(t) - getter := mocks.NewMockGetter(ctrl) - ctx, cancel := context.WithCancel(context.Background()) + getter := mock.NewMockGetter(ctrl) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() eds := edstest.RandEDS(t, 4) - dah, err := da.NewDataAvailabilityHeader(eds) - eh := headertest.RandExtendedHeaderWithRoot(t, &dah) + roots, err := share.NewAxisRoots(eds) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + require.NoError(t, err) + + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) require.NoError(t, err) - avail := TestAvailability(t, getter) + avail := NewShareAvailability(store, getter) - errors := []error{share.ErrNotFound, context.DeadlineExceeded} + errors := []error{shwap.ErrNotFound, context.DeadlineExceeded} for _, getterErr := range errors { getter.EXPECT().GetEDS(gomock.Any(), gomock.Any()).Return(nil, getterErr) err := avail.SharesAvailable(ctx, eh) diff --git a/share/availability/full/reconstruction_test.go b/share/availability/full/reconstruction_test.go index 31edb3b6d9..179a458d36 100644 --- a/share/availability/full/reconstruction_test.go +++ b/share/availability/full/reconstruction_test.go @@ -1,284 +1,284 @@ -//go:build !race - +// //go:build !race package full -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/availability/light" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/eds" -) - -func init() { - eds.RetrieveQuadrantTimeout = time.Millisecond * 100 // to speed up tests -} - -// TestShareAvailable_OneFullNode asserts that a full node can ensure -// data is available (reconstruct data square) while being connected to -// light nodes only. -func TestShareAvailable_OneFullNode(t *testing.T) { - // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper - light.DefaultSampleAmount = 20 // s - const ( - origSquareSize = 16 // k - lightNodes = 69 // c - ) - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - - net := availability_test.NewTestDAGNet(ctx, t) - source, root := RandNode(net, origSquareSize) // make a source node, a.k.a bridge - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - full := Node(net) // make a full availability service which reconstructs data - - // ensure there is no connection between source and full nodes - // so that full reconstructs from the light nodes only - net.Disconnect(source.ID(), full.ID()) - - errg, errCtx := errgroup.WithContext(ctx) - errg.Go(func() error { - return full.SharesAvailable(errCtx, eh) - }) - - lights := make([]*availability_test.TestNode, lightNodes) - for i := 0; i < len(lights); i++ { - lights[i] = light.Node(net) - go func(i int) { - err := lights[i].SharesAvailable(ctx, eh) - if err != nil { - t.Log("light errors:", err) - } - }(i) - } - - for i := 0; i < len(lights); i++ { - net.Connect(lights[i].ID(), source.ID()) - } - - for i := 0; i < len(lights); i++ { - net.Connect(lights[i].ID(), full.ID()) - } - - err := errg.Wait() - require.NoError(t, err) -} - -// TestShareAvailable_ConnectedFullNodes asserts that two connected full nodes -// can ensure data availability via two isolated light node subnetworks. Full -// nodes start their availability process first, then light node start -// availability process and connect to full node and only after light node -// connect to the source node which has the data. After light node connect to the -// source, full node must be able to finish the availability process started in -// the beginning. -func TestShareAvailable_ConnectedFullNodes(t *testing.T) { - // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper - light.DefaultSampleAmount = 20 // s - const ( - origSquareSize = 16 // k - lightNodes = 60 // c - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - defer cancel() - - net := availability_test.NewTestDAGNet(ctx, t) - source, root := RandNode(net, origSquareSize) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - - // create two full nodes and ensure they are disconnected - full1 := Node(net) - full2 := Node(net) - - // pre-connect fulls - net.Connect(full1.ID(), full2.ID()) - // ensure fulls and source are not connected - // so that fulls take data from light nodes only - net.Disconnect(full1.ID(), source.ID()) - net.Disconnect(full2.ID(), source.ID()) - - // start reconstruction for fulls - errg, errCtx := errgroup.WithContext(ctx) - errg.Go(func() error { - return full1.SharesAvailable(errCtx, eh) - }) - errg.Go(func() error { - return full2.SharesAvailable(errCtx, eh) - }) - - // create light nodes and start sampling for them immediately - lights1, lights2 := make( - []*availability_test.TestNode, lightNodes/2), - make([]*availability_test.TestNode, lightNodes/2) - for i := 0; i < len(lights1); i++ { - lights1[i] = light.Node(net) - go func(i int) { - err := lights1[i].SharesAvailable(ctx, eh) - if err != nil { - t.Log("light1 errors:", err) - } - }(i) - - lights2[i] = light.Node(net) - go func(i int) { - err := lights2[i].SharesAvailable(ctx, eh) - if err != nil { - t.Log("light2 errors:", err) - } - }(i) - } - - // shape topology - for i := 0; i < len(lights1); i++ { - // ensure lights1 are only connected to full1 - net.Connect(lights1[i].ID(), full1.ID()) - net.Disconnect(lights1[i].ID(), full2.ID()) - // ensure lights2 are only connected to full2 - net.Connect(lights2[i].ID(), full2.ID()) - net.Disconnect(lights2[i].ID(), full1.ID()) - } - - // start connection lights with sources - for i := 0; i < len(lights1); i++ { - net.Connect(lights1[i].ID(), source.ID()) - net.Connect(lights2[i].ID(), source.ID()) - } - - err := errg.Wait() - require.NoError(t, err) -} - -// TestShareAvailable_DisconnectedFullNodes asserts that two disconnected full -// nodes cannot ensure data is available (reconstruct data square) while being -// connected to isolated light nodes subnetworks, which do not have enough nodes -// to reconstruct the data, but once ShareAvailability nodes connect, they can -// collectively reconstruct it. -// -//nolint:dupword -func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { - // S - Source - // L - Light Node - // F - Full Node - // ── - connection - // - // Topology: - // NOTE: There are more Light Nodes in practice - // ┌─┬─┬─S─┬─┬─┐ - // │ │ │ │ │ │ - // │ │ │ │ │ │ - // │ │ │ │ │ │ - // L L L L L L - // │ │ │ │ │ │ - // └─┴─┤ ├─┴─┘ - // F└───┘F - // - - // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper - light.DefaultSampleAmount = 20 // s - const ( - origSquareSize = 16 // k - lightNodes = 32 // c - total number of nodes on two subnetworks - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) - defer cancel() - - net := availability_test.NewTestDAGNet(ctx, t) - source, root := RandNode(net, origSquareSize) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - - // create light nodes and start sampling for them immediately - lights1, lights2 := make( - []*availability_test.TestNode, lightNodes/2), - make([]*availability_test.TestNode, lightNodes/2) - - var wg sync.WaitGroup - wg.Add(lightNodes) - for i := 0; i < len(lights1); i++ { - lights1[i] = light.Node(net) - go func(i int) { - defer wg.Done() - err := lights1[i].SharesAvailable(ctx, eh) - if err != nil { - t.Log("light1 errors:", err) - } - }(i) - - lights2[i] = light.Node(net) - go func(i int) { - defer wg.Done() - err := lights2[i].SharesAvailable(ctx, eh) - if err != nil { - t.Log("light2 errors:", err) - } - }(i) - } - - // create two full nodes and ensure they are disconnected - full1 := Node(net) - full2 := Node(net) - net.Disconnect(full1.ID(), full2.ID()) - - // ensure fulls and source are not connected - // so that fulls take data from light nodes only - net.Disconnect(full1.ID(), source.ID()) - net.Disconnect(full2.ID(), source.ID()) - - // shape topology - for i := 0; i < len(lights1); i++ { - // ensure lights1 are only connected to source and full1 - net.Connect(lights1[i].ID(), source.ID()) - net.Connect(lights1[i].ID(), full1.ID()) - net.Disconnect(lights1[i].ID(), full2.ID()) - // ensure lights2 are only connected to source and full2 - net.Connect(lights2[i].ID(), source.ID()) - net.Connect(lights2[i].ID(), full2.ID()) - net.Disconnect(lights2[i].ID(), full1.ID()) - } - - // start reconstruction for fulls that should fail - ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*5) - errg, errCtx := errgroup.WithContext(ctxErr) - errg.Go(func() error { - return full1.SharesAvailable(errCtx, eh) - }) - errg.Go(func() error { - return full2.SharesAvailable(errCtx, eh) - }) - - // check that any of the fulls cannot reconstruct on their own - err := errg.Wait() - require.ErrorIs(t, err, share.ErrNotAvailable) - cancelErr() - - // but after they connect - net.Connect(full1.ID(), full2.ID()) - - // with clean caches from the previous try - full1.ClearStorage() - full2.ClearStorage() - - // they both should be able to reconstruct the block - errg, bctx := errgroup.WithContext(ctx) - errg.Go(func() error { - return full1.SharesAvailable(bctx, eh) - }) - errg.Go(func() error { - return full2.SharesAvailable(bctx, eh) - }) - require.NoError(t, errg.Wait()) - // wait for all routines to finish before exit, in case there are any errors to log - wg.Wait() -} +// +// import ( +// "context" +// "sync" +// "testing" +// "time" +// +// "github.com/stretchr/testify/require" +// "golang.org/x/sync/errgroup" +// +// "github.com/celestiaorg/celestia-node/header/headertest" +// "github.com/celestiaorg/celestia-node/share" +// "github.com/celestiaorg/celestia-node/share/availability/light" +// availability_test "github.com/celestiaorg/celestia-node/share/availability/test" +// "github.com/celestiaorg/celestia-node/share/eds" +//) +// +// func init() { +// eds.RetrieveQuadrantTimeout = time.Millisecond * 100 // to speed up tests +//} +// +//// TestShareAvailable_OneFullNode asserts that a full node can ensure +//// data is available (reconstruct data square) while being connected to +//// light nodes only. +// func TestShareAvailable_OneFullNode(t *testing.T) { +// // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper +// light.DefaultSampleAmount = 20 // s +// const ( +// origSquareSize = 16 // k +// lightNodes = 69 // c +// ) +// +// ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) +// defer cancel() +// +// net := availability_test.NewTestDAGNet(ctx, t) +// source, root := RandNode(net, origSquareSize) // make a source node, a.k.a bridge +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// full := Node(net) // make a full availability service which reconstructs data +// +// // ensure there is no connection between source and full nodes +// // so that full reconstructs from the light nodes only +// net.Disconnect(source.ID(), full.ID()) +// +// errg, errCtx := errgroup.WithContext(ctx) +// errg.Go(func() error { +// return full.SharesAvailable(errCtx, eh) +// }) +// +// lights := make([]*availability_test.TestNode, lightNodes) +// for i := 0; i < len(lights); i++ { +// lights[i] = light.Node(net) +// go func(i int) { +// err := lights[i].SharesAvailable(ctx, eh) +// if err != nil { +// t.Log("light errors:", err) +// } +// }(i) +// } +// +// for i := 0; i < len(lights); i++ { +// net.Connect(lights[i].ID(), source.ID()) +// } +// +// for i := 0; i < len(lights); i++ { +// net.Connect(lights[i].ID(), full.ID()) +// } +// +// err := errg.Wait() +// require.NoError(t, err) +//} +// +//// TestShareAvailable_ConnectedFullNodes asserts that two connected full nodes +//// can ensure data availability via two isolated light node subnetworks. Full +//// nodes start their availability process first, then light node start +//// availability process and connect to full node and only after light node +//// connect to the source node which has the data. After light node connect to the +//// source, full node must be able to finish the availability process started in +//// the beginning. +// func TestShareAvailable_ConnectedFullNodes(t *testing.T) { +// // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper +// light.DefaultSampleAmount = 20 // s +// const ( +// origSquareSize = 16 // k +// lightNodes = 60 // c +// ) +// +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) +// defer cancel() +// +// net := availability_test.NewTestDAGNet(ctx, t) +// source, root := RandNode(net, origSquareSize) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// +// // create two full nodes and ensure they are disconnected +// full1 := Node(net) +// full2 := Node(net) +// +// // pre-connect fulls +// net.Connect(full1.ID(), full2.ID()) +// // ensure fulls and source are not connected +// // so that fulls take data from light nodes only +// net.Disconnect(full1.ID(), source.ID()) +// net.Disconnect(full2.ID(), source.ID()) +// +// // start reconstruction for fulls +// errg, errCtx := errgroup.WithContext(ctx) +// errg.Go(func() error { +// return full1.SharesAvailable(errCtx, eh) +// }) +// errg.Go(func() error { +// return full2.SharesAvailable(errCtx, eh) +// }) +// +// // create light nodes and start sampling for them immediately +// lights1, lights2 := make( +// []*availability_test.TestNode, lightNodes/2), +// make([]*availability_test.TestNode, lightNodes/2) +// for i := 0; i < len(lights1); i++ { +// lights1[i] = light.Node(net) +// go func(i int) { +// err := lights1[i].SharesAvailable(ctx, eh) +// if err != nil { +// t.Log("light1 errors:", err) +// } +// }(i) +// +// lights2[i] = light.Node(net) +// go func(i int) { +// err := lights2[i].SharesAvailable(ctx, eh) +// if err != nil { +// t.Log("light2 errors:", err) +// } +// }(i) +// } +// +// // shape topology +// for i := 0; i < len(lights1); i++ { +// // ensure lights1 are only connected to full1 +// net.Connect(lights1[i].ID(), full1.ID()) +// net.Disconnect(lights1[i].ID(), full2.ID()) +// // ensure lights2 are only connected to full2 +// net.Connect(lights2[i].ID(), full2.ID()) +// net.Disconnect(lights2[i].ID(), full1.ID()) +// } +// +// // start connection lights with sources +// for i := 0; i < len(lights1); i++ { +// net.Connect(lights1[i].ID(), source.ID()) +// net.Connect(lights2[i].ID(), source.ID()) +// } +// +// err := errg.Wait() +// require.NoError(t, err) +//} +// +//// TestShareAvailable_DisconnectedFullNodes asserts that two disconnected full +//// nodes cannot ensure data is available (reconstruct data square) while being +//// connected to isolated light nodes subnetworks, which do not have enough nodes +//// to reconstruct the data, but once ShareAvailability nodes connect, they can +//// collectively reconstruct it. +//// +////nolint:dupword +// func TestShareAvailable_DisconnectedFullNodes(t *testing.T) { +// // S - Source +// // L - Light Node +// // F - Full Node +// // ── - connection +// // +// // Topology: +// // NOTE: There are more Light Nodes in practice +// // ┌─┬─┬─S─┬─┬─┐ +// // │ │ │ │ │ │ +// // │ │ │ │ │ │ +// // │ │ │ │ │ │ +// // L L L L L L +// // │ │ │ │ │ │ +// // └─┴─┤ ├─┴─┘ +// // F└───┘F +// // +// +// // NOTE: Numbers are taken from the original 'Fraud and Data Availability Proofs' paper +// light.DefaultSampleAmount = 20 // s +// const ( +// origSquareSize = 16 // k +// lightNodes = 32 // c - total number of nodes on two subnetworks +// ) +// +// ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) +// defer cancel() +// +// net := availability_test.NewTestDAGNet(ctx, t) +// source, root := RandNode(net, origSquareSize) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// +// // create light nodes and start sampling for them immediately +// lights1, lights2 := make( +// []*availability_test.TestNode, lightNodes/2), +// make([]*availability_test.TestNode, lightNodes/2) +// +// var wg sync.WaitGroup +// wg.Add(lightNodes) +// for i := 0; i < len(lights1); i++ { +// lights1[i] = light.Node(net) +// go func(i int) { +// defer wg.Done() +// err := lights1[i].SharesAvailable(ctx, eh) +// if err != nil { +// t.Log("light1 errors:", err) +// } +// }(i) +// +// lights2[i] = light.Node(net) +// go func(i int) { +// defer wg.Done() +// err := lights2[i].SharesAvailable(ctx, eh) +// if err != nil { +// t.Log("light2 errors:", err) +// } +// }(i) +// } +// +// // create two full nodes and ensure they are disconnected +// full1 := Node(net) +// full2 := Node(net) +// net.Disconnect(full1.ID(), full2.ID()) +// +// // ensure fulls and source are not connected +// // so that fulls take data from light nodes only +// net.Disconnect(full1.ID(), source.ID()) +// net.Disconnect(full2.ID(), source.ID()) +// +// // shape topology +// for i := 0; i < len(lights1); i++ { +// // ensure lights1 are only connected to source and full1 +// net.Connect(lights1[i].ID(), source.ID()) +// net.Connect(lights1[i].ID(), full1.ID()) +// net.Disconnect(lights1[i].ID(), full2.ID()) +// // ensure lights2 are only connected to source and full2 +// net.Connect(lights2[i].ID(), source.ID()) +// net.Connect(lights2[i].ID(), full2.ID()) +// net.Disconnect(lights2[i].ID(), full1.ID()) +// } +// +// // start reconstruction for fulls that should fail +// ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*5) +// errg, errCtx := errgroup.WithContext(ctxErr) +// errg.Go(func() error { +// return full1.SharesAvailable(errCtx, eh) +// }) +// errg.Go(func() error { +// return full2.SharesAvailable(errCtx, eh) +// }) +// +// // check that any of the fulls cannot reconstruct on their own +// err := errg.Wait() +// require.ErrorIs(t, err, share.ErrNotAvailable) +// cancelErr() +// +// // but after they connect +// net.Connect(full1.ID(), full2.ID()) +// +// // with clean caches from the previous try +// full1.ClearStorage() +// full2.ClearStorage() +// +// // they both should be able to reconstruct the block +// errg, bctx := errgroup.WithContext(ctx) +// errg.Go(func() error { +// return full1.SharesAvailable(bctx, eh) +// }) +// errg.Go(func() error { +// return full2.SharesAvailable(bctx, eh) +// }) +// require.NoError(t, errg.Wait()) +// // wait for all routines to finish before exit, in case there are any errors to log +// wg.Wait() +//} diff --git a/share/availability/full/testing.go b/share/availability/full/testing.go index 7379c83441..9ebe3f1c18 100644 --- a/share/availability/full/testing.go +++ b/share/availability/full/testing.go @@ -1,56 +1,57 @@ package full -import ( - "context" - "testing" - "time" - - "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/p2p/discovery" -) - -// GetterWithRandSquare provides a share.Getter filled with 'n' NMT -// trees of 'n' random shares, essentially storing a whole square. -func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *share.Root) { - bServ := ipld.NewMemBlockservice() - getter := getters.NewIPLDGetter(bServ) - return getter, availability_test.RandFillBS(t, n, bServ) -} - -// RandNode creates a Full Node filled with a random block of the given size. -func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { - nd := Node(dn) - return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) -} - -// Node creates a new empty Full Node. -func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { - nd := dn.NewTestNode() - nd.Getter = getters.NewIPLDGetter(nd.BlockService) - nd.Availability = TestAvailability(dn.T, nd.Getter) - return nd -} - -func TestAvailability(t *testing.T, getter share.Getter) *ShareAvailability { - params := discovery.DefaultParameters() - params.AdvertiseInterval = time.Second - params.PeersLimit = 10 - - store, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), datastore.NewMapDatastore()) - require.NoError(t, err) - err = store.Start(context.Background()) - require.NoError(t, err) - - t.Cleanup(func() { - err = store.Stop(context.Background()) - require.NoError(t, err) - }) - return NewShareAvailability(store, getter) -} +// +// import ( +// "context" +// "testing" +// "time" +// +// "github.com/ipfs/go-datastore" +// "github.com/stretchr/testify/require" +// +// "github.com/celestiaorg/celestia-node/share" +// availability_test "github.com/celestiaorg/celestia-node/share/availability/test" +// "github.com/celestiaorg/celestia-node/share/eds" +// "github.com/celestiaorg/celestia-node/share/getters" +// "github.com/celestiaorg/celestia-node/share/ipld" +// "github.com/celestiaorg/celestia-node/share/p2p/discovery" +//) +// +//// GetterWithRandSquare provides a share.Getter filled with 'n' NMT +//// trees of 'n' random shares, essentially storing a whole square. +// func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *share.AxisRoots) { +// bServ := ipld.NewMemBlockservice() +// getter := getters.NewIPLDGetter(bServ) +// return getter, availability_test.RandFillBS(t, n, bServ) +//} +// +//// RandNode creates a Full Node filled with a random block of the given size. +// func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.AxisRoots) { +// nd := Node(dn) +// return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) +//} +// +//// Node creates a new empty Full Node. +// func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { +// nd := dn.NewTestNode() +// nd.Getter = getters.NewIPLDGetter(nd.BlockService) +// nd.Availability = TestAvailability(dn.T, nd.Getter) +// return nd +//} +// +// func TestAvailability(t *testing.T, getter share.Getter) *ShareAvailability { +// params := discovery.DefaultParameters() +// params.AdvertiseInterval = time.Second +// params.PeersLimit = 10 +// +// store, err := eds.NewStore(eds.DefaultParameters(), t.TempDir(), datastore.NewMapDatastore()) +// require.NoError(t, err) +// err = store.Start(context.Background()) +// require.NoError(t, err) +// +// t.Cleanup(func() { +// err = store.Stop(context.Background()) +// require.NoError(t, err) +// }) +// return NewShareAvailability(store, getter) +//} diff --git a/share/availability/light/availability.go b/share/availability/light/availability.go index b188a33c14..5b1e144de0 100644 --- a/share/availability/light/availability.go +++ b/share/availability/light/availability.go @@ -12,7 +12,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/getters" + "github.com/celestiaorg/celestia-node/share/shwap" ) var ( @@ -26,7 +26,7 @@ var ( // its availability. It is assumed that there are a lot of lightAvailability instances // on the network doing sampling over the same Root to collectively verify its availability. type ShareAvailability struct { - getter share.Getter + getter shwap.Getter params Parameters // TODO(@Wondertan): Once we come to parallelized DASer, this lock becomes a contention point @@ -38,7 +38,7 @@ type ShareAvailability struct { // NewShareAvailability creates a new light Availability. func NewShareAvailability( - getter share.Getter, + getter shwap.Getter, ds datastore.Batching, opts ...Option, ) *ShareAvailability { @@ -61,8 +61,8 @@ func NewShareAvailability( // ExtendedHeader. This way SharesAvailable subjectively verifies that Shares are available. func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { dah := header.DAH - // short-circuit if the given root is minimum DAH of an empty data square - if share.DataHash(dah.Hash()).IsEmptyRoot() { + // short-circuit if the given root is an empty data square + if share.DataHash(dah.Hash()).IsEmptyEDS() { return nil } @@ -100,10 +100,6 @@ func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header return err } - // indicate to the share.Getter that a blockservice session should be created. This - // functionality is optional and must be supported by the used share.Getter. - ctx = getters.WithSession(ctx) - var ( failedSamplesLock sync.Mutex failedSamples []Sample @@ -153,7 +149,7 @@ func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header return nil } -func rootKey(root *share.Root) datastore.Key { +func rootKey(root *share.AxisRoots) datastore.Key { return datastore.NewKey(root.String()) } diff --git a/share/availability/light/availability_test.go b/share/availability/light/availability_test.go index 2f7b7a6cf9..b8ec9db784 100644 --- a/share/availability/light/availability_test.go +++ b/share/availability/light/availability_test.go @@ -3,28 +3,47 @@ package light import ( "context" _ "embed" - "strconv" + "sync" "testing" + "github.com/golang/mock/gomock" + "github.com/ipfs/go-datastore" "github.com/stretchr/testify/require" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/getters/mock" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" ) func TestSharesAvailableCaches(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - getter, eh := GetterWithRandSquare(t, 16) - dah := eh.DAH - avail := TestAvailability(getter) - - // cache doesn't have dah yet - has, err := avail.ds.Has(ctx, rootKey(dah)) + eds := edstest.RandEDS(t, 16) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + + getter := mock.NewMockGetter(gomock.NewController(t)) + getter.EXPECT(). + GetShare(gomock.Any(), eh, gomock.Any(), gomock.Any()). + DoAndReturn( + func(_ context.Context, _ *header.ExtendedHeader, row, col int) (share.Share, error) { + return eds.GetCell(uint(row), uint(col)), nil + }). + AnyTimes() + + ds := datastore.NewMapDatastore() + avail := NewShareAvailability(getter, ds) + + // cache doesn't have eds yet + has, err := avail.ds.Has(ctx, rootKey(roots)) require.NoError(t, err) require.False(t, has) @@ -32,7 +51,7 @@ func TestSharesAvailableCaches(t *testing.T) { require.NoError(t, err) // is now stored success result - result, err := avail.ds.Get(ctx, rootKey(dah)) + result, err := avail.ds.Get(ctx, rootKey(roots)) require.NoError(t, err) failed, err := decodeSamples(result) require.NoError(t, err) @@ -43,20 +62,26 @@ func TestSharesAvailableHitsCache(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - getter, _ := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) + // create getter that always return ErrNotFound + getter := mock.NewMockGetter(gomock.NewController(t)) + getter.EXPECT(). + GetShare(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, shrex.ErrNotFound). + AnyTimes() + + ds := datastore.NewMapDatastore() + avail := NewShareAvailability(getter, ds) - // create new dah, that is not available by getter - bServ := ipld.NewMemBlockservice() - dah := availability_test.RandFillBS(t, 16, bServ) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) + // generate random header + roots := edstest.RandomAxisRoots(t, 16) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) - // blockstore doesn't actually have the dah + // store doesn't actually have the eds err := avail.SharesAvailable(ctx, eh) require.ErrorIs(t, err, share.ErrNotAvailable) // put success result in cache - err = avail.ds.Put(ctx, rootKey(dah), []byte{}) + err = avail.ds.Put(ctx, rootKey(roots), []byte{}) require.NoError(t, err) // should hit cache after putting @@ -68,10 +93,12 @@ func TestSharesAvailableEmptyRoot(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - getter, _ := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) + getter := mock.NewMockGetter(gomock.NewController(t)) + ds := datastore.NewMapDatastore() + avail := NewShareAvailability(getter, ds) - eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyRoot()) + // request for empty eds + eh := headertest.RandExtendedHeaderWithRoot(t, share.EmptyEDSRoots()) err := avail.SharesAvailable(ctx, eh) require.NoError(t, err) } @@ -80,20 +107,26 @@ func TestSharesAvailableFailed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - getter, _ := GetterWithRandSquare(t, 16) - avail := TestAvailability(getter) + getter := mock.NewMockGetter(gomock.NewController(t)) + ds := datastore.NewMapDatastore() + avail := NewShareAvailability(getter, ds) - // create new dah, that is not available by getter - bServ := ipld.NewMemBlockservice() - dah := availability_test.RandFillBS(t, 16, bServ) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) + // create new eds, that is not available by getter + eds := edstest.RandEDS(t, 16) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) - // blockstore doesn't actually have the dah, so it should fail - err := avail.SharesAvailable(ctx, eh) + // getter doesn't have the eds, so it should fail + getter.EXPECT(). + GetShare(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, shrex.ErrNotFound). + AnyTimes() + err = avail.SharesAvailable(ctx, eh) require.ErrorIs(t, err, share.ErrNotAvailable) // cache should have failed results now - result, err := avail.ds.Get(ctx, rootKey(dah)) + result, err := avail.ds.Get(ctx, rootKey(roots)) require.NoError(t, err) failed, err := decodeSamples(result) @@ -116,142 +149,45 @@ func TestSharesAvailableFailed(t *testing.T) { require.Empty(t, onceGetter.available) } -func TestShareAvailableOverMocknet_Light(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - net := availability_test.NewTestDAGNet(ctx, t) - _, root := RandNode(net, 16) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - nd := Node(net) - net.ConnectAll() - - err := nd.SharesAvailable(ctx, eh) - require.NoError(t, err) +type onceGetter struct { + *sync.Mutex + available map[Sample]struct{} } -func TestGetShare(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n := 16 - getter, eh := GetterWithRandSquare(t, n) - - for i := range make([]bool, n) { - for j := range make([]bool, n) { - sh, err := getter.GetShare(ctx, eh, i, j) - require.NotNil(t, sh) - require.NoError(t, err) - } +func newOnceGetter() onceGetter { + return onceGetter{ + Mutex: &sync.Mutex{}, + available: make(map[Sample]struct{}), } } -func TestService_GetSharesByNamespace(t *testing.T) { - tests := []struct { - squareSize int - expectedShareCount int - }{ - {squareSize: 4, expectedShareCount: 2}, - {squareSize: 16, expectedShareCount: 2}, - {squareSize: 128, expectedShareCount: 2}, - } - - for _, tt := range tests { - t.Run("size: "+strconv.Itoa(tt.squareSize), func(t *testing.T) { - getter, bServ := EmptyGetter() - totalShares := tt.squareSize * tt.squareSize - randShares := sharetest.RandShares(t, totalShares) - idx1 := (totalShares - 1) / 2 - idx2 := totalShares / 2 - if tt.expectedShareCount > 1 { - // make it so that two rows have the same namespace - copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) - } - root := availability_test.FillBS(t, bServ, randShares) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - randNamespace := share.GetNamespace(randShares[idx1]) - - shares, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(root, randNamespace)) - flattened := shares.Flatten() - require.Len(t, flattened, tt.expectedShareCount) - for _, value := range flattened { - require.Equal(t, randNamespace, share.GetNamespace(value)) - } - if tt.expectedShareCount > 1 { - // idx1 is always smaller than idx2 - require.Equal(t, randShares[idx1], flattened[0]) - require.Equal(t, randShares[idx2], flattened[1]) - } - }) - t.Run("last two rows of a 4x4 square that have the same namespace have valid NMT proofs", func(t *testing.T) { - squareSize := 4 - totalShares := squareSize * squareSize - getter, bServ := EmptyGetter() - randShares := sharetest.RandShares(t, totalShares) - lastNID := share.GetNamespace(randShares[totalShares-1]) - for i := totalShares / 2; i < totalShares; i++ { - copy(share.GetNamespace(randShares[i]), lastNID) - } - root := availability_test.FillBS(t, bServ, randShares) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - - shares, err := getter.GetSharesByNamespace(context.Background(), eh, lastNID) - require.NoError(t, err) - require.NoError(t, shares.Verify(root, lastNID)) - }) +func (m onceGetter) AddSamples(samples []Sample) { + m.Lock() + defer m.Unlock() + for _, s := range samples { + m.available[s] = struct{}{} } } -func TestGetShares(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n := 16 - getter, eh := GetterWithRandSquare(t, n) - - eds, err := getter.GetEDS(ctx, eh) - require.NoError(t, err) - gotDAH, err := share.NewRoot(eds) - require.NoError(t, err) - - require.True(t, eh.DAH.Equals(gotDAH)) +func (m onceGetter) GetShare(_ context.Context, _ *header.ExtendedHeader, row, col int) (share.Share, error) { + m.Lock() + defer m.Unlock() + s := Sample{Row: uint16(row), Col: uint16(col)} + if _, ok := m.available[s]; ok { + delete(m.available, s) + return share.Share{}, nil + } + return share.Share{}, share.ErrNotAvailable } -func TestService_GetSharesByNamespaceNotFound(t *testing.T) { - getter, eh := GetterWithRandSquare(t, 1) - eh.DAH.RowRoots = nil - - emptyShares, err := getter.GetSharesByNamespace(context.Background(), eh, sharetest.RandV0Namespace()) - require.NoError(t, err) - require.Empty(t, emptyShares.Flatten()) +func (m onceGetter) GetEDS(_ context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + panic("not implemented") } -func BenchmarkService_GetSharesByNamespace(b *testing.B) { - tests := []struct { - amountShares int - }{ - {amountShares: 4}, - {amountShares: 16}, - {amountShares: 128}, - } - - for _, tt := range tests { - b.Run(strconv.Itoa(tt.amountShares), func(b *testing.B) { - t := &testing.T{} - getter, eh := GetterWithRandSquare(t, tt.amountShares) - root := eh.DAH - randNamespace := root.RowRoots[(len(root.RowRoots)-1)/2][:share.NamespaceSize] - root.RowRoots[(len(root.RowRoots) / 2)] = root.RowRoots[(len(root.RowRoots)-1)/2] - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := getter.GetSharesByNamespace(context.Background(), eh, randNamespace) - require.NoError(t, err) - } - }) - } +func (m onceGetter) GetSharesByNamespace( + _ context.Context, + _ *header.ExtendedHeader, + _ share.Namespace, +) (shwap.NamespaceData, error) { + panic("not implemented") } diff --git a/share/availability/light/testing.go b/share/availability/light/testing.go index b6251b4fbd..a7ef3248c4 100644 --- a/share/availability/light/testing.go +++ b/share/availability/light/testing.go @@ -1,107 +1,108 @@ package light -import ( - "context" - "sync" - "testing" - - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/go-datastore" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/getters" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -// GetterWithRandSquare provides a share.Getter filled with 'n' NMT trees of 'n' random shares, -// essentially storing a whole square. -func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *header.ExtendedHeader) { - bServ := ipld.NewMemBlockservice() - getter := getters.NewIPLDGetter(bServ) - root := availability_test.RandFillBS(t, n, bServ) - eh := headertest.RandExtendedHeader(t) - eh.DAH = root - - return getter, eh -} - -// EmptyGetter provides an unfilled share.Getter with corresponding blockservice.BlockService than -// can be filled by the test. -func EmptyGetter() (share.Getter, blockservice.BlockService) { - bServ := ipld.NewMemBlockservice() - getter := getters.NewIPLDGetter(bServ) - return getter, bServ -} - -// RandNode creates a Light Node filled with a random block of the given size. -func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.Root) { - nd := Node(dn) - return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) -} - -// Node creates a new empty Light Node. -func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { - nd := dn.NewTestNode() - nd.Getter = getters.NewIPLDGetter(nd.BlockService) - nd.Availability = TestAvailability(nd.Getter) - return nd -} - -func TestAvailability(getter share.Getter) *ShareAvailability { - ds := datastore.NewMapDatastore() - return NewShareAvailability(getter, ds) -} - -func SubNetNode(sn *availability_test.SubNet) *availability_test.TestNode { - nd := Node(sn.TestDagNet) - sn.AddNode(nd) - return nd -} - -type onceGetter struct { - *sync.Mutex - available map[Sample]struct{} -} - -func newOnceGetter() onceGetter { - return onceGetter{ - Mutex: &sync.Mutex{}, - available: make(map[Sample]struct{}), - } -} - -func (m onceGetter) AddSamples(samples []Sample) { - m.Lock() - defer m.Unlock() - for _, s := range samples { - m.available[s] = struct{}{} - } -} - -func (m onceGetter) GetShare(_ context.Context, _ *header.ExtendedHeader, row, col int) (share.Share, error) { - m.Lock() - defer m.Unlock() - s := Sample{Row: uint16(row), Col: uint16(col)} - if _, ok := m.available[s]; ok { - delete(m.available, s) - return share.Share{}, nil - } - return share.Share{}, share.ErrNotAvailable -} - -func (m onceGetter) GetEDS(_ context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { - panic("not implemented") -} - -func (m onceGetter) GetSharesByNamespace( - _ context.Context, - _ *header.ExtendedHeader, - _ share.Namespace, -) (share.NamespacedShares, error) { - panic("not implemented") -} +// +// import ( +// "context" +// "sync" +// "testing" +// +// "github.com/ipfs/boxo/blockservice" +// "github.com/ipfs/go-datastore" +// +// "github.com/celestiaorg/rsmt2d" +// +// "github.com/celestiaorg/celestia-node/header" +// "github.com/celestiaorg/celestia-node/header/headertest" +// "github.com/celestiaorg/celestia-node/share" +// availability_test "github.com/celestiaorg/celestia-node/share/availability/test" +// "github.com/celestiaorg/celestia-node/share/getters" +// "github.com/celestiaorg/celestia-node/share/ipld" +//) +// +//// GetterWithRandSquare provides a share.Getter filled with 'n' NMT trees of 'n' random shares, +//// essentially storing a whole square. +// func GetterWithRandSquare(t *testing.T, n int) (share.Getter, *header.ExtendedHeader) { +// bServ := ipld.NewMemBlockservice() +// getter := getters.NewIPLDGetter(bServ) +// root := availability_test.RandFillBS(t, n, bServ) +// eh := headertest.RandExtendedHeader(t) +// eh.DAH = root +// +// return getter, eh +//} +// +//// EmptyGetter provides an unfilled share.Getter with corresponding blockservice.BlockService than +//// can be filled by the test. +// func EmptyGetter() (share.Getter, blockservice.BlockService) { +// bServ := ipld.NewMemBlockservice() +// getter := getters.NewIPLDGetter(bServ) +// return getter, bServ +//} +// +//// RandNode creates a Light Node filled with a random block of the given size. +// func RandNode(dn *availability_test.TestDagNet, squareSize int) (*availability_test.TestNode, *share.AxisRoots) { +// nd := Node(dn) +// return nd, availability_test.RandFillBS(dn.T, squareSize, nd.BlockService) +//} +// +//// Node creates a new empty Light Node. +// func Node(dn *availability_test.TestDagNet) *availability_test.TestNode { +// nd := dn.NewTestNode() +// nd.Getter = getters.NewIPLDGetter(nd.BlockService) +// nd.Availability = TestAvailability(nd.Getter) +// return nd +//} +// +// func TestAvailability(getter share.Getter) *ShareAvailability { +// ds := datastore.NewMapDatastore() +// return NewShareAvailability(getter, ds) +//} +// +// func SubNetNode(sn *availability_test.SubNet) *availability_test.TestNode { +// nd := Node(sn.TestDagNet) +// sn.AddNode(nd) +// return nd +//} +// +// type onceGetter struct { +// *sync.Mutex +// available map[Sample]struct{} +//} +// +// func newOnceGetter() onceGetter { +// return onceGetter{ +// Mutex: &sync.Mutex{}, +// available: make(map[Sample]struct{}), +// } +//} +// +// func (m onceGetter) AddSamples(samples []Sample) { +// m.Lock() +// defer m.Unlock() +// for _, s := range samples { +// m.available[s] = struct{}{} +// } +//} +// +// func (m onceGetter) GetShare(_ context.Context, _ *header.ExtendedHeader, row, col int) (share.Share, error) { +// m.Lock() +// defer m.Unlock() +// s := Sample{Row: uint16(row), Col: uint16(col)} +// if _, ok := m.available[s]; ok { +// delete(m.available, s) +// return share.Share{}, nil +// } +// return share.Share{}, share.ErrNotAvailable +//} +// +// func (m onceGetter) GetEDS(_ context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { +// panic("not implemented") +//} +// +// func (m onceGetter) GetSharesByNamespace( +// _ context.Context, +// _ *header.ExtendedHeader, +// _ share.Namespace, +// ) (share.NamespacedShares, error) { +// panic("not implemented") +// } diff --git a/share/availability/test/corrupt_data.go b/share/availability/test/corrupt_data.go deleted file mode 100644 index 1ff553f8b3..0000000000 --- a/share/availability/test/corrupt_data.go +++ /dev/null @@ -1,130 +0,0 @@ -package availability_test - -import ( - "context" - "crypto/rand" - "fmt" - mrand "math/rand" - "testing" - - "github.com/ipfs/boxo/blockstore" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" -) - -var _ blockstore.Blockstore = (*FraudulentBlockstore)(nil) - -// CorruptBlock is a block where the cid doesn't match the data. It fulfills the blocks.Block -// interface. -type CorruptBlock struct { - cid cid.Cid - data []byte -} - -func (b *CorruptBlock) RawData() []byte { - return b.data -} - -func (b *CorruptBlock) Cid() cid.Cid { - return b.cid -} - -func (b *CorruptBlock) String() string { - return fmt.Sprintf("[Block %s]", b.Cid()) -} - -func (b *CorruptBlock) Loggable() map[string]interface{} { - return map[string]interface{}{ - "block": b.Cid().String(), - } -} - -func NewCorruptBlock(data []byte, fakeCID cid.Cid) *CorruptBlock { - return &CorruptBlock{ - fakeCID, - data, - } -} - -// FraudulentBlockstore is a mock blockstore.Blockstore that saves both corrupted and original data -// for every block it receives. If FraudulentBlockstore.Attacking is true, it will serve the -// corrupted data on requests. -type FraudulentBlockstore struct { - ds.Datastore - Attacking bool -} - -func (fb FraudulentBlockstore) Has(context.Context, cid.Cid) (bool, error) { - return false, nil -} - -func (fb FraudulentBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - key := cid.String() - if fb.Attacking { - key = "corrupt_get" + key - } - - data, err := fb.Datastore.Get(ctx, ds.NewKey(key)) - if err != nil { - return nil, err - } - return NewCorruptBlock(data, cid), nil -} - -func (fb FraudulentBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { - key := cid.String() - if fb.Attacking { - key = "corrupt_size" + key - } - - return fb.Datastore.GetSize(ctx, ds.NewKey(key)) -} - -func (fb FraudulentBlockstore) Put(ctx context.Context, block blocks.Block) error { - err := fb.Datastore.Put(ctx, ds.NewKey(block.Cid().String()), block.RawData()) - if err != nil { - return err - } - - // create data that doesn't match the CID with arbitrary lengths between 1 and - // len(block.RawData())*2 - corrupted := make([]byte, 1+mrand.Int()%(len(block.RawData())*2-1)) //nolint:gosec - _, _ = rand.Read(corrupted) - return fb.Datastore.Put(ctx, ds.NewKey("corrupt"+block.Cid().String()), corrupted) -} - -func (fb FraudulentBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { - for _, b := range blocks { - err := fb.Put(ctx, b) - if err != nil { - return err - } - } - return nil -} - -func (fb FraudulentBlockstore) DeleteBlock(context.Context, cid.Cid) error { - panic("implement me") -} - -func (fb FraudulentBlockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - panic("implement me") -} - -func (fb FraudulentBlockstore) HashOnRead(bool) { - panic("implement me") -} - -// MockNode creates a TestNode that uses a FraudulentBlockstore to simulate serving corrupted data. -func MockNode(t *testing.T, net *TestDagNet) (*TestNode, *FraudulentBlockstore) { - t.Helper() - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - mockBS := &FraudulentBlockstore{ - Datastore: dstore, - Attacking: false, - } - provider := net.NewTestNodeWithBlockstore(dstore, mockBS) - return provider, mockBS -} diff --git a/share/availability/test/testing.go b/share/availability/test/testing.go index 64e8d23bb7..6a8f1906ca 100644 --- a/share/availability/test/testing.go +++ b/share/availability/test/testing.go @@ -20,26 +20,27 @@ import ( "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" ) // RandFillBS fills the given BlockService with a random block of a given size. -func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *share.Root { +func RandFillBS(t *testing.T, n int, bServ blockservice.BlockService) *share.AxisRoots { shares := sharetest.RandShares(t, n*n) return FillBS(t, bServ, shares) } // FillBS fills the given BlockService with the given shares. -func FillBS(t *testing.T, bServ blockservice.BlockService, shares []share.Share) *share.Root { +func FillBS(t *testing.T, bServ blockservice.BlockService, shares []share.Share) *share.AxisRoots { eds, err := ipld.AddShares(context.TODO(), shares, bServ) require.NoError(t, err) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - return dah + return roots } type TestNode struct { net *TestDagNet - share.Getter + shwap.Getter share.Availability blockservice.BlockService host.Host diff --git a/share/doc.go b/share/doc.go index 97229932a7..6c6426472f 100644 --- a/share/doc.go +++ b/share/doc.go @@ -5,7 +5,7 @@ block data. Though this package contains several useful methods for getting specific shares and/or sampling them at random, a particularly useful method is GetSharesByNamespace which retrieves all shares of block data of the given Namespace from the block associated with the given -DataAvailabilityHeader (DAH, but referred to as Root within this package). +DataAvailabilityHeader (DAH, but referred to as AxisRoots within this package). This package also contains declaration of the Availability interface. Implementations of the interface (light, full) are located in the availability sub-folder. diff --git a/share/eds/accessor.go b/share/eds/accessor.go new file mode 100644 index 0000000000..ca0b475bb2 --- /dev/null +++ b/share/eds/accessor.go @@ -0,0 +1,57 @@ +package eds + +import ( + "context" + "io" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +// EmptyAccessor is an accessor of an empty EDS block. +var EmptyAccessor = &Rsmt2D{ExtendedDataSquare: share.EmptyEDS()} + +// Accessor is an interface for accessing extended data square data. +type Accessor interface { + // Size returns square size of the Accessor. + Size(ctx context.Context) int + // DataHash returns data hash of the Accessor. + DataHash(ctx context.Context) (share.DataHash, error) + // AxisRoots returns share.AxisRoots (DataAvailabilityHeader) of the Accessor. + AxisRoots(ctx context.Context) (*share.AxisRoots, error) + // Sample returns share and corresponding proof for row and column indices. Implementation can + // choose which axis to use for proof. Chosen axis for proof should be indicated in the returned + // Sample. + Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) + // AxisHalf returns half of shares axis of the given type and index. Side is determined by + // implementation. Implementations should indicate the side in the returned AxisHalf. + AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) + // RowNamespaceData returns data for the given namespace and row index. + RowNamespaceData(ctx context.Context, namespace share.Namespace, rowIdx int) (shwap.RowNamespaceData, error) + // Shares returns data (ODS) shares extracted from the Accessor. + Shares(ctx context.Context) ([]share.Share, error) +} + +// AccessorStreamer is an interface that groups Accessor and Streamer interfaces. +type AccessorStreamer interface { + Accessor + Streamer +} + +type Streamer interface { + // Reader returns binary reader for the shares. It should read the shares from the + // ODS part of the square row by row. + Reader() (io.Reader, error) + io.Closer +} + +type accessorStreamer struct { + Accessor + Streamer +} + +func AccessorAndStreamer(a Accessor, s Streamer) AccessorStreamer { + return &accessorStreamer{a, s} +} diff --git a/share/eds/adapters.go b/share/eds/adapters.go deleted file mode 100644 index 8bf2340d91..0000000000 --- a/share/eds/adapters.go +++ /dev/null @@ -1,66 +0,0 @@ -package eds - -import ( - "context" - "sync" - - "github.com/filecoin-project/dagstore" - "github.com/ipfs/boxo/blockservice" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" -) - -var _ blockservice.BlockGetter = (*BlockGetter)(nil) - -// NewBlockGetter creates new blockservice.BlockGetter adapter from dagstore.ReadBlockstore -func NewBlockGetter(store dagstore.ReadBlockstore) *BlockGetter { - return &BlockGetter{store: store} -} - -// BlockGetter is an adapter for dagstore.ReadBlockstore to implement blockservice.BlockGetter -// interface. -type BlockGetter struct { - store dagstore.ReadBlockstore -} - -// GetBlock gets the requested block by the given CID. -func (bg *BlockGetter) GetBlock(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - return bg.store.Get(ctx, cid) -} - -// GetBlocks does a batch request for the given cids, returning blocks as -// they are found, in no particular order. -// -// It implements blockservice.BlockGetter interface, that requires: -// It may not be able to find all requested blocks (or the context may -// be canceled). In that case, it will close the channel early. It is up -// to the consumer to detect this situation and keep track which blocks -// it has received and which it hasn't. -func (bg *BlockGetter) GetBlocks(ctx context.Context, cids []cid.Cid) <-chan blocks.Block { - bCh := make(chan blocks.Block) - - go func() { - var wg sync.WaitGroup - wg.Add(len(cids)) - for _, c := range cids { - go func(cid cid.Cid) { - defer wg.Done() - block, err := bg.store.Get(ctx, cid) - if err != nil { - log.Debugw("getblocks: error getting block by cid", "cid", cid, "error", err) - return - } - - select { - case bCh <- block: - case <-ctx.Done(): - return - } - }(c) - } - wg.Wait() - close(bCh) - }() - - return bCh -} diff --git a/share/eds/adapters_test.go b/share/eds/adapters_test.go deleted file mode 100644 index 70165b81c8..0000000000 --- a/share/eds/adapters_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package eds - -import ( - "context" - "errors" - mrand "math/rand" - "sort" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share/ipld" -) - -func TestBlockGetter_GetBlocks(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - cids := randCIDs(t, 32) - // sort cids in asc order - sort.Slice(cids, func(i, j int) bool { - return cids[i].String() < cids[j].String() - }) - - bg := &BlockGetter{store: rbsMock{}} - blocksCh := bg.GetBlocks(context.Background(), cids) - - // collect blocks from channel - blocks := make([]blocks.Block, 0, len(cids)) - for block := range blocksCh { - blocks = append(blocks, block) - } - - // sort blocks in cid asc order - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Cid().String() < blocks[j].Cid().String() - }) - - // validate results - require.Equal(t, len(cids), len(blocks)) - for i, block := range blocks { - require.Equal(t, cids[i].String(), block.Cid().String()) - } - }) - t.Run("retrieval error", func(t *testing.T) { - cids := randCIDs(t, 32) - - // split cids into failed and succeeded - failedLen := mrand.Intn(len(cids)-1) + 1 - failed := make(map[cid.Cid]struct{}, failedLen) - succeeded := make([]cid.Cid, 0, len(cids)-failedLen) - for i, cid := range cids { - if i < failedLen { - failed[cid] = struct{}{} - continue - } - succeeded = append(succeeded, cid) - } - - // sort succeeded cids in asc order - sort.Slice(succeeded, func(i, j int) bool { - return succeeded[i].String() < succeeded[j].String() - }) - - bg := &BlockGetter{store: rbsMock{failed: failed}} - blocksCh := bg.GetBlocks(context.Background(), cids) - - // collect blocks from channel - blocks := make([]blocks.Block, 0, len(cids)) - for block := range blocksCh { - blocks = append(blocks, block) - } - - // sort blocks in cid asc order - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].Cid().String() < blocks[j].Cid().String() - }) - - // validate results - require.Equal(t, len(succeeded), len(blocks)) - for i, block := range blocks { - require.Equal(t, succeeded[i].String(), block.Cid().String()) - } - }) - t.Run("retrieval timeout", func(t *testing.T) { - cids := randCIDs(t, 128) - - bg := &BlockGetter{ - store: rbsMock{}, - } - - // cancel the context before any blocks are collected - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - blocksCh := bg.GetBlocks(ctx, cids) - - // pretend nobody is reading from blocksCh after context is canceled - time.Sleep(50 * time.Millisecond) - - // blocksCh should be closed indicating GetBlocks exited - select { - case _, ok := <-blocksCh: - require.False(t, ok) - default: - t.Error("channel is not closed on canceled context") - } - }) -} - -// rbsMock is a dagstore.ReadBlockstore mock -type rbsMock struct { - failed map[cid.Cid]struct{} -} - -func (r rbsMock) Has(context.Context, cid.Cid) (bool, error) { - panic("implement me") -} - -func (r rbsMock) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { - // return error for failed items - if _, ok := r.failed[cid]; ok { - return nil, errors.New("not found") - } - - return blocks.NewBlockWithCid(nil, cid) -} - -func (r rbsMock) GetSize(context.Context, cid.Cid) (int, error) { - panic("implement me") -} - -func (r rbsMock) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - panic("implement me") -} - -func (r rbsMock) HashOnRead(bool) { - panic("implement me") -} - -func randCIDs(t *testing.T, n int) []cid.Cid { - cids := make([]cid.Cid, n) - for i := range cids { - cids[i] = ipld.RandNamespacedCID(t) - } - return cids -} diff --git a/share/eds/axis_half.go b/share/eds/axis_half.go new file mode 100644 index 0000000000..6b48676fe2 --- /dev/null +++ b/share/eds/axis_half.go @@ -0,0 +1,69 @@ +package eds + +import ( + "fmt" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var codec = share.DefaultRSMT2DCodec() + +// AxisHalf represents a half of data for a row or column in the EDS. +type AxisHalf struct { + Shares []share.Share + // IsParity indicates whether the half is parity or data. + IsParity bool +} + +// ToRow converts the AxisHalf to a shwap.Row. +func (a AxisHalf) ToRow() shwap.Row { + side := shwap.Left + if a.IsParity { + side = shwap.Right + } + return shwap.NewRow(a.Shares, side) +} + +// Extended returns full axis shares from half axis shares. +func (a AxisHalf) Extended() ([]share.Share, error) { + if a.IsParity { + return reconstructShares(a.Shares) + } + return extendShares(a.Shares) +} + +// extendShares constructs full axis shares from original half axis shares. +func extendShares(original []share.Share) ([]share.Share, error) { + if len(original) == 0 { + return nil, fmt.Errorf("original shares are empty") + } + + parity, err := codec.Encode(original) + if err != nil { + return nil, fmt.Errorf("encoding: %w", err) + } + + sqLen := len(original) * 2 + shares := make([]share.Share, sqLen) + copy(shares, original) + copy(shares[sqLen/2:], parity) + return shares, nil +} + +func reconstructShares(parity []share.Share) ([]share.Share, error) { + if len(parity) == 0 { + return nil, fmt.Errorf("parity shares are empty") + } + + sqLen := len(parity) * 2 + shares := make([]share.Share, sqLen) + for i := sqLen / 2; i < sqLen; i++ { + shares[i] = parity[i-sqLen/2] + } + _, err := codec.Decode(shares) + if err != nil { + return nil, fmt.Errorf("reconstructing: %w", err) + } + return shares, nil +} diff --git a/share/eds/axis_half_test.go b/share/eds/axis_half_test.go new file mode 100644 index 0000000000..752add5acd --- /dev/null +++ b/share/eds/axis_half_test.go @@ -0,0 +1,32 @@ +package eds + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestExtendAxisHalf(t *testing.T) { + shares := sharetest.RandShares(t, 16) + + original := AxisHalf{ + Shares: shares, + IsParity: false, + } + + extended, err := original.Extended() + require.NoError(t, err) + require.Len(t, extended, len(shares)*2) + + parity := AxisHalf{ + Shares: extended[len(shares):], + IsParity: true, + } + + parityExtended, err := parity.Extended() + require.NoError(t, err) + + require.Equal(t, extended, parityExtended) +} diff --git a/share/eds/blockstore.go b/share/eds/blockstore.go deleted file mode 100644 index 5ae109bf29..0000000000 --- a/share/eds/blockstore.go +++ /dev/null @@ -1,156 +0,0 @@ -package eds - -import ( - "context" - "errors" - "fmt" - - bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/boxo/datastore/dshelp" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - ipld "github.com/ipfs/go-ipld-format" - - share_ipld "github.com/celestiaorg/celestia-node/share/ipld" -) - -var _ bstore.Blockstore = (*blockstore)(nil) - -var ( - blockstoreCacheKey = datastore.NewKey("bs-cache") - errUnsupportedOperation = errors.New("unsupported operation") -) - -// blockstore implements the store.Blockstore interface on an EDSStore. -// The lru cache approach is heavily inspired by the existing implementation upstream. -// We simplified the design to not support multiple shards per key, call GetSize directly on the -// underlying RO blockstore, and do not throw errors on Put/PutMany. Also, we do not abstract away -// the blockstore operations. -// -// The intuition here is that each CAR file is its own blockstore, so we need this top level -// implementation to allow for the blockstore operations to be routed to the underlying stores. -type blockstore struct { - store *Store - ds datastore.Batching -} - -func newBlockstore(store *Store, ds datastore.Batching) *blockstore { - return &blockstore{ - store: store, - ds: namespace.Wrap(ds, blockstoreCacheKey), - } -} - -func (bs *blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { - keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) - if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - // key wasn't found in top level blockstore, but could be in datastore while being reconstructed - dsHas, dsErr := bs.ds.Has(ctx, dshelp.MultihashToDsKey(cid.Hash())) - if dsErr != nil { - return false, nil //nolint:nilerr // return false if error - } - return dsHas, nil - } - if err != nil { - return false, err - } - - return len(keys) > 0, nil -} - -func (bs *blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - blockstr, err := bs.getReadOnlyBlockstore(ctx, cid) - if err == nil { - defer closeAndLog("blockstore", blockstr) - return blockstr.Get(ctx, cid) - } - - if errors.Is(err, ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - k := dshelp.MultihashToDsKey(cid.Hash()) - blockData, err := bs.ds.Get(ctx, k) - if err == nil { - return blocks.NewBlockWithCid(blockData, cid) - } - // nmt's GetNode expects an ipld.ErrNotFound when a cid is not found. - return nil, ipld.ErrNotFound{Cid: cid} - } - - log.Debugf("failed to get blockstore for cid %s: %s", cid, err) - return nil, err -} - -func (bs *blockstore) GetSize(context.Context, cid.Cid) (int, error) { - // For now we return a fixed result, which is a max of possible values (see above). - // Motivation behind such behavior is described here: - // https://github.com/celestiaorg/celestia-node/issues/3630 - return share_ipld.LeafNodeSize, nil -} - -func (bs *blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { - k := dshelp.MultihashToDsKey(cid.Hash()) - return bs.ds.Delete(ctx, k) -} - -func (bs *blockstore) Put(ctx context.Context, blk blocks.Block) error { - k := dshelp.MultihashToDsKey(blk.Cid().Hash()) - // note: we leave duplicate resolution to the underlying datastore - return bs.ds.Put(ctx, k, blk.RawData()) -} - -func (bs *blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { - if len(blocks) == 1 { - // performance fast-path - return bs.Put(ctx, blocks[0]) - } - - t, err := bs.ds.Batch(ctx) - if err != nil { - return err - } - for _, b := range blocks { - k := dshelp.MultihashToDsKey(b.Cid().Hash()) - err = t.Put(ctx, k, b.RawData()) - if err != nil { - return err - } - } - return t.Commit(ctx) -} - -// AllKeysChan is a noop on the EDS blockstore because the keys are not stored in a single CAR file. -func (bs *blockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - return nil, errUnsupportedOperation -} - -// HashOnRead is a noop on the EDS blockstore but an error cannot be returned due to the method -// signature from the blockstore interface. -func (bs *blockstore) HashOnRead(bool) { - log.Warnf("HashOnRead is a noop on the EDS blockstore") -} - -// getReadOnlyBlockstore finds the underlying blockstore of the shard that contains the given CID. -func (bs *blockstore) getReadOnlyBlockstore(ctx context.Context, cid cid.Cid) (*BlockstoreCloser, error) { - keys, err := bs.store.dgstr.ShardsContainingMultihash(ctx, cid.Hash()) - if errors.Is(err, datastore.ErrNotFound) || errors.Is(err, ErrNotFoundInIndex) { - return nil, ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("failed to find shards containing multihash: %w", err) - } - - // check if either cache contains an accessor - shardKey := keys[0] - accessor, err := bs.store.cache.Load().Get(shardKey) - if err == nil { - return blockstoreCloser(accessor) - } - - // load accessor to the blockstore cache and use it as blockstoreCloser - accessor, err = bs.store.cache.Load().Second().GetOrLoad(ctx, shardKey, bs.store.getAccessor) - if err != nil { - return nil, fmt.Errorf("failed to get accessor for shard %s: %w", shardKey, err) - } - return blockstoreCloser(accessor) -} diff --git a/share/eds/blockstore_test.go b/share/eds/blockstore_test.go deleted file mode 100644 index d9dbf7ed30..0000000000 --- a/share/eds/blockstore_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package eds - -import ( - "context" - "io" - "testing" - - "github.com/filecoin-project/dagstore" - ipld "github.com/ipfs/go-ipld-format" - "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - ipld2 "github.com/celestiaorg/celestia-node/share/ipld" -) - -// TestBlockstore_Operations tests Has, Get, and GetSize on the top level eds.Store blockstore. -// It verifies that these operations are valid and successful on all blocks stored in a CAR file. -func TestBlockstore_Operations(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - r, err := edsStore.GetCAR(ctx, dah.Hash()) - require.NoError(t, err) - carReader, err := car.NewCarReader(r) - require.NoError(t, err) - - topLevelBS := edsStore.Blockstore() - carBS, err := edsStore.CARBlockstore(ctx, dah.Hash()) - require.NoError(t, err) - defer func() { - require.NoError(t, carBS.Close()) - }() - - root, err := edsStore.GetDAH(ctx, dah.Hash()) - require.NoError(t, err) - require.True(t, dah.Equals(root)) - - blockstores := []dagstore.ReadBlockstore{topLevelBS, carBS} - - for { - next, err := carReader.Next() - if err != nil { - require.ErrorIs(t, err, io.EOF) - break - } - blockCid := next.Cid() - randomCid := ipld2.RandNamespacedCID(t) - - for _, bs := range blockstores { - // test GetSize - has, err := bs.Has(ctx, blockCid) - require.NoError(t, err, "blockstore.Has could not find root CID") - require.True(t, has) - - // test GetSize - block, err := bs.Get(ctx, blockCid) - assert.NoError(t, err, "blockstore.Get could not get a leaf CID") - assert.Equal(t, block.Cid(), blockCid) - assert.Equal(t, block.RawData(), next.RawData()) - - // test Get (cid not found) - _, err = bs.Get(ctx, randomCid) - require.ErrorAs(t, err, &ipld.ErrNotFound{Cid: randomCid}) - - // test GetSize - size, err := bs.GetSize(ctx, blockCid) - assert.NotZerof(t, size, "blocksize.GetSize reported a root block from blockstore was empty") - assert.NoError(t, err) - } - } -} diff --git a/share/eds/byzantine/bad_encoding_test.go b/share/eds/byzantine/bad_encoding_test.go index 59ac24ad55..f4dbe84e31 100644 --- a/share/eds/byzantine/bad_encoding_test.go +++ b/share/eds/byzantine/bad_encoding_test.go @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/require" core "github.com/tendermint/tendermint/types" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/celestia-app/v2/test/util/malicious" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -31,16 +30,16 @@ func TestBEFP_Validate(t *testing.T) { bServ := ipld.NewMemBlockservice() square := edstest.RandByzantineEDS(t, 16) - dah, err := da.NewDataAvailabilityHeader(square) + roots, err := share.NewAxisRoots(square) require.NoError(t, err) err = ipld.ImportEDS(ctx, square, bServ) require.NoError(t, err) var errRsmt2d *rsmt2d.ErrByzantineData - err = square.Repair(dah.RowRoots, dah.ColumnRoots) + err = square.Repair(roots.RowRoots, roots.ColumnRoots) require.ErrorAs(t, err, &errRsmt2d) - byzantine := NewErrByzantine(ctx, bServ.Blockstore(), &dah, errRsmt2d) + byzantine := NewErrByzantine(ctx, bServ.Blockstore(), roots, errRsmt2d) var errByz *ErrByzantine require.ErrorAs(t, byzantine, &errByz) @@ -55,7 +54,7 @@ func TestBEFP_Validate(t *testing.T) { { name: "valid BEFP", prepareFn: func() error { - return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: roots}) }, expectedResult: func(err error) { require.NoError(t, err) @@ -65,12 +64,12 @@ func TestBEFP_Validate(t *testing.T) { name: "invalid BEFP for valid header", prepareFn: func() error { validSquare := edstest.RandEDS(t, 2) - validDah, err := da.NewDataAvailabilityHeader(validSquare) + validRoots, err := share.NewAxisRoots(validSquare) require.NoError(t, err) err = ipld.ImportEDS(ctx, validSquare, bServ) require.NoError(t, err) validShares := validSquare.Flattened() - errInvalidByz := NewErrByzantine(ctx, bServ.Blockstore(), &validDah, + errInvalidByz := NewErrByzantine(ctx, bServ.Blockstore(), validRoots, &rsmt2d.ErrByzantineData{ Axis: rsmt2d.Row, Index: 0, @@ -80,7 +79,7 @@ func TestBEFP_Validate(t *testing.T) { var errInvalid *ErrByzantine require.ErrorAs(t, errInvalidByz, &errInvalid) invalidBefp := CreateBadEncodingProof([]byte("hash"), 0, errInvalid) - return invalidBefp.Validate(&header.ExtendedHeader{DAH: &validDah}) + return invalidBefp.Validate(&header.ExtendedHeader{DAH: validRoots}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errNMTTreeRootsMatch) @@ -93,7 +92,7 @@ func TestBEFP_Validate(t *testing.T) { sh := sharetest.RandShares(t, 2) nmtProof := nmt.NewInclusionProof(0, 1, nil, false) befp.Shares[0] = &ShareWithProof{sh[0], &nmtProof, rsmt2d.Row} - return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: roots}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectShare) @@ -103,7 +102,7 @@ func TestBEFP_Validate(t *testing.T) { name: "invalid amount of shares", prepareFn: func() error { befp.Shares = befp.Shares[0 : len(befp.Shares)/2] - return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: roots}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectAmountOfShares) @@ -113,7 +112,7 @@ func TestBEFP_Validate(t *testing.T) { name: "not enough shares to recompute the root", prepareFn: func() error { befp.Shares[0] = nil - return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: roots}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectAmountOfShares) @@ -123,7 +122,7 @@ func TestBEFP_Validate(t *testing.T) { name: "index out of bounds", prepareFn: func() error { befp.Index = 100 - return proof.Validate(&header.ExtendedHeader{DAH: &dah}) + return proof.Validate(&header.ExtendedHeader{DAH: roots}) }, expectedResult: func(err error) { require.ErrorIs(t, err, errIncorrectIndex) @@ -136,7 +135,7 @@ func TestBEFP_Validate(t *testing.T) { RawHeader: core.Header{ Height: 42, }, - DAH: &dah, + DAH: roots, }) }, expectedResult: func(err error) { @@ -166,14 +165,14 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { eds, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) // get an arbitrary row rowIdx := squareSize / 2 shareProofs := make([]*ShareWithProof, 0, eds.Width()) for i := range shareProofs { - proof, err := GetShareWithProof(ctx, bServ, dah, shares[i], rsmt2d.Row, rowIdx, i) + proof, err := GetShareWithProof(ctx, bServ, roots, shares[i], rsmt2d.Row, rowIdx, i) require.NoError(t, err) shareProofs = append(shareProofs, proof) } @@ -189,7 +188,7 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { RawHeader: core.Header{ Height: 420, }, - DAH: dah, + DAH: roots, Commit: &core.Commit{ BlockID: core.BlockID{ Hash: []byte("made up hash"), @@ -221,22 +220,22 @@ func TestBEFP_ValidateOutOfOrderShares(t *testing.T) { ) require.NoError(t, err, "failure to recompute the extended data square") - dah, err := da.NewDataAvailabilityHeader(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) var errRsmt2d *rsmt2d.ErrByzantineData - err = eds.Repair(dah.RowRoots, dah.ColumnRoots) + err = eds.Repair(roots.RowRoots, roots.ColumnRoots) require.ErrorAs(t, err, &errRsmt2d) err = batchAddr.Commit() require.NoError(t, err) - byzantine := NewErrByzantine(ctx, bServ.Blockstore(), &dah, errRsmt2d) + byzantine := NewErrByzantine(ctx, bServ.Blockstore(), roots, errRsmt2d) var errByz *ErrByzantine require.ErrorAs(t, byzantine, &errByz) befp := CreateBadEncodingProof([]byte("hash"), 0, errByz) - err = befp.Validate(&header.ExtendedHeader{DAH: &dah}) + err = befp.Validate(&header.ExtendedHeader{DAH: roots}) require.NoError(t, err) } diff --git a/share/eds/byzantine/byzantine.go b/share/eds/byzantine/byzantine.go index 44307138c5..8f7b7c3a3b 100644 --- a/share/eds/byzantine/byzantine.go +++ b/share/eds/byzantine/byzantine.go @@ -6,9 +6,9 @@ import ( "github.com/ipfs/boxo/blockstore" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -32,7 +32,7 @@ func (e *ErrByzantine) Error() string { func NewErrByzantine( ctx context.Context, bStore blockstore.Blockstore, - dah *da.DataAvailabilityHeader, + roots *share.AxisRoots, errByz *rsmt2d.ErrByzantineData, ) error { sharesWithProof := make([]*ShareWithProof, len(errByz.Shares)) @@ -42,7 +42,7 @@ func NewErrByzantine( if len(share) == 0 { continue } - swp, err := GetShareWithProof(ctx, bGetter, dah, share, errByz.Axis, int(errByz.Index), index) + swp, err := GetShareWithProof(ctx, bGetter, roots, share, errByz.Axis, int(errByz.Index), index) if err != nil { log.Warn("requesting proof failed", "errByz", errByz, @@ -53,12 +53,12 @@ func NewErrByzantine( sharesWithProof[index] = swp // it is enough to collect half of the shares to construct the befp - if count++; count >= len(dah.RowRoots)/2 { + if count++; count >= len(roots.RowRoots)/2 { break } } - if count < len(dah.RowRoots)/2 { + if count < len(roots.RowRoots)/2 { return fmt.Errorf("failed to collect proof") } diff --git a/share/eds/byzantine/share_proof.go b/share/eds/byzantine/share_proof.go index d064656830..4dd2c082b6 100644 --- a/share/eds/byzantine/share_proof.go +++ b/share/eds/byzantine/share_proof.go @@ -3,10 +3,8 @@ package byzantine import ( "context" "errors" - "math" "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/nmt" @@ -31,16 +29,16 @@ type ShareWithProof struct { } // Validate validates inclusion of the share under the given root CID. -func (s *ShareWithProof) Validate(dah *share.Root, axisType rsmt2d.Axis, axisIdx, shrIdx int) bool { +func (s *ShareWithProof) Validate(roots *share.AxisRoots, axisType rsmt2d.Axis, axisIdx, shrIdx int) bool { var rootHash []byte switch axisType { case rsmt2d.Row: - rootHash = rootHashForCoordinates(dah, s.Axis, shrIdx, axisIdx) + rootHash = rootHashForCoordinates(roots, s.Axis, shrIdx, axisIdx) case rsmt2d.Col: - rootHash = rootHashForCoordinates(dah, s.Axis, axisIdx, shrIdx) + rootHash = rootHashForCoordinates(roots, s.Axis, axisIdx, shrIdx) } - edsSize := len(dah.RowRoots) + edsSize := len(roots.RowRoots) isParity := shrIdx >= edsSize/2 || axisIdx >= edsSize/2 namespace := share.ParitySharesNamespace if !isParity { @@ -77,33 +75,31 @@ func (s *ShareWithProof) ShareWithProofToProto() *pb.Share { func GetShareWithProof( ctx context.Context, bGetter blockservice.BlockGetter, - dah *share.Root, + roots *share.AxisRoots, share share.Share, axisType rsmt2d.Axis, axisIdx, shrIdx int, ) (*ShareWithProof, error) { if axisType == rsmt2d.Col { axisIdx, shrIdx, axisType = shrIdx, axisIdx, rsmt2d.Row } - width := len(dah.RowRoots) + width := len(roots.RowRoots) // try row proofs - root := dah.RowRoots[axisIdx] - rootCid := ipld.MustCidFromNamespacedSha256(root) - proof, err := getProofsAt(ctx, bGetter, rootCid, shrIdx, width) + root := roots.RowRoots[axisIdx] + proof, err := ipld.GetProof(ctx, bGetter, root, shrIdx, width) if err == nil { shareWithProof := &ShareWithProof{ Share: share, Proof: &proof, Axis: rsmt2d.Row, } - if shareWithProof.Validate(dah, axisType, axisIdx, shrIdx) { + if shareWithProof.Validate(roots, axisType, axisIdx, shrIdx) { return shareWithProof, nil } } // try column proofs - root = dah.ColumnRoots[shrIdx] - rootCid = ipld.MustCidFromNamespacedSha256(root) - proof, err = getProofsAt(ctx, bGetter, rootCid, axisIdx, width) + root = roots.ColumnRoots[shrIdx] + proof, err = ipld.GetProof(ctx, bGetter, root, axisIdx, width) if err != nil { return nil, err } @@ -112,34 +108,12 @@ func GetShareWithProof( Proof: &proof, Axis: rsmt2d.Col, } - if shareWithProof.Validate(dah, axisType, axisIdx, shrIdx) { + if shareWithProof.Validate(roots, axisType, axisIdx, shrIdx) { return shareWithProof, nil } return nil, errors.New("failed to collect proof") } -func getProofsAt( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - index, - total int, -) (nmt.Proof, error) { - proofPath := make([]cid.Cid, 0, int(math.Sqrt(float64(total)))) - proofPath, err := ipld.GetProof(ctx, bGetter, root, proofPath, index, total) - if err != nil { - return nmt.Proof{}, err - } - - rangeProofs := make([][]byte, 0, len(proofPath)) - for i := len(proofPath) - 1; i >= 0; i-- { - node := ipld.NamespacedSha256FromCID(proofPath[i]) - rangeProofs = append(rangeProofs, node) - } - - return nmt.NewInclusionProof(index, index+1, rangeProofs, true), nil -} - func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof { shares := make([]*ShareWithProof, len(protoShares)) for i, share := range protoShares { @@ -165,7 +139,7 @@ func ProtoToProof(protoProof *nmt_pb.Proof) nmt.Proof { ) } -func rootHashForCoordinates(r *share.Root, axisType rsmt2d.Axis, x, y int) []byte { +func rootHashForCoordinates(r *share.AxisRoots, axisType rsmt2d.Axis, x, y int) []byte { if axisType == rsmt2d.Row { return r.RowRoots[y] } diff --git a/share/eds/byzantine/share_proof_test.go b/share/eds/byzantine/share_proof_test.go deleted file mode 100644 index 2b5cb57bb9..0000000000 --- a/share/eds/byzantine/share_proof_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package byzantine - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/v2/pkg/da" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" -) - -func TestGetProof(t *testing.T) { - const width = 8 - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - bServ := ipld.NewMemBlockservice() - - shares := sharetest.RandShares(t, width*width) - in, err := ipld.AddShares(ctx, shares, bServ) - require.NoError(t, err) - - dah, err := da.NewDataAvailabilityHeader(in) - require.NoError(t, err) - - for _, proofType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { - var roots [][]byte - switch proofType { - case rsmt2d.Row: - roots = dah.RowRoots - case rsmt2d.Col: - roots = dah.ColumnRoots - } - for axisIdx := 0; axisIdx < width*2; axisIdx++ { - rootCid := ipld.MustCidFromNamespacedSha256(roots[axisIdx]) - for shrIdx := 0; shrIdx < width*2; shrIdx++ { - proof, err := getProofsAt(ctx, bServ, rootCid, shrIdx, int(in.Width())) - require.NoError(t, err) - node, err := ipld.GetLeaf(ctx, bServ, rootCid, shrIdx, int(in.Width())) - require.NoError(t, err) - inclusion := &ShareWithProof{ - Share: share.GetData(node.RawData()), - Proof: &proof, - Axis: proofType, - } - require.True(t, inclusion.Validate(&dah, proofType, axisIdx, shrIdx)) - // swap axis indexes to test if validation still works against the orthogonal coordinate - switch proofType { - case rsmt2d.Row: - require.True(t, inclusion.Validate(&dah, rsmt2d.Col, shrIdx, axisIdx)) - case rsmt2d.Col: - require.True(t, inclusion.Validate(&dah, rsmt2d.Row, shrIdx, axisIdx)) - } - } - } - } -} diff --git a/share/eds/cache/accessor_cache.go b/share/eds/cache/accessor_cache.go deleted file mode 100644 index e7ac043426..0000000000 --- a/share/eds/cache/accessor_cache.go +++ /dev/null @@ -1,267 +0,0 @@ -package cache - -import ( - "context" - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - "time" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - lru "github.com/hashicorp/golang-lru/v2" -) - -const defaultCloseTimeout = time.Minute - -var _ Cache = (*AccessorCache)(nil) - -// AccessorCache implements the Cache interface using an LRU cache backend. -type AccessorCache struct { - // The name is a prefix that will be used for cache metrics if they are enabled. - name string - // stripedLocks prevents simultaneous RW access to the blockstore cache for a shard. Instead - // of using only one lock or one lock per key, we stripe the shard keys across 256 locks. 256 is - // chosen because it 0-255 is the range of values we get looking at the last byte of the key. - stripedLocks [256]sync.Mutex - // Caches the blockstore for a given shard for shard read affinity, i.e., further reads will likely - // be from the same shard. Maps (shard key -> blockstore). - cache *lru.Cache[shard.Key, *accessorWithBlockstore] - - metrics *metrics -} - -// accessorWithBlockstore is the value that we store in the blockstore Cache. It implements the -// Accessor interface. -type accessorWithBlockstore struct { - sync.RWMutex - shardAccessor Accessor - // The blockstore is stored separately because each access to the blockstore over the shard - // accessor reopens the underlying CAR. - bs dagstore.ReadBlockstore - - done chan struct{} - refs atomic.Int32 - isClosed bool -} - -// Blockstore implements the Blockstore of the Accessor interface. It creates the blockstore on the -// first request and reuses the created instance for all subsequent requests. -func (s *accessorWithBlockstore) Blockstore() (dagstore.ReadBlockstore, error) { - s.Lock() - defer s.Unlock() - var err error - if s.bs == nil { - s.bs, err = s.shardAccessor.Blockstore() - } - return s.bs, err -} - -// Reader returns a new copy of the reader to read data. -func (s *accessorWithBlockstore) Reader() io.Reader { - return s.shardAccessor.Reader() -} - -func (s *accessorWithBlockstore) addRef() error { - s.Lock() - defer s.Unlock() - if s.isClosed { - // item is already closed and soon will be removed after all refs are released - return errCacheMiss - } - if s.refs.Add(1) == 1 { - // there were no refs previously and done channel was closed, reopen it by recreating - s.done = make(chan struct{}) - } - return nil -} - -func (s *accessorWithBlockstore) removeRef() { - s.Lock() - defer s.Unlock() - if s.refs.Add(-1) <= 0 { - close(s.done) - } -} - -func (s *accessorWithBlockstore) close() error { - s.Lock() - if s.isClosed { - s.Unlock() - // accessor will be closed by another goroutine - return nil - } - s.isClosed = true - done := s.done - s.Unlock() - - // wait until all references are released or timeout is reached. If timeout is reached, log an - // error and close the accessor forcefully. - select { - case <-done: - case <-time.After(defaultCloseTimeout): - log.Errorf("closing accessor, some readers didn't close the accessor within timeout,"+ - " amount left: %v", s.refs.Load()) - } - if err := s.shardAccessor.Close(); err != nil { - return fmt.Errorf("closing accessor: %w", err) - } - return nil -} - -func NewAccessorCache(name string, cacheSize int) (*AccessorCache, error) { - bc := &AccessorCache{ - name: name, - } - // Instantiate the blockstore Cache. - bslru, err := lru.NewWithEvict[shard.Key, *accessorWithBlockstore](cacheSize, bc.evictFn()) - if err != nil { - return nil, fmt.Errorf("failed to instantiate blockstore cache: %w", err) - } - bc.cache = bslru - return bc, nil -} - -// evictFn will be invoked when an item is evicted from the cache. -func (bc *AccessorCache) evictFn() func(shard.Key, *accessorWithBlockstore) { - return func(_ shard.Key, abs *accessorWithBlockstore) { - // we can release accessor from cache early, while it is being closed in parallel routine - go func() { - err := abs.close() - if err != nil { - bc.metrics.observeEvicted(true) - log.Errorf("couldn't close accessor after cache eviction: %s", err) - return - } - bc.metrics.observeEvicted(false) - }() - } -} - -// Get retrieves the Accessor for a given shard key from the Cache. If the Accessor is not in -// the Cache, it returns an errCacheMiss. -func (bc *AccessorCache) Get(key shard.Key) (Accessor, error) { - lk := &bc.stripedLocks[shardKeyToStriped(key)] - lk.Lock() - defer lk.Unlock() - - accessor, err := bc.get(key) - if err != nil { - bc.metrics.observeGet(false) - return nil, err - } - bc.metrics.observeGet(true) - return newRefCloser(accessor) -} - -func (bc *AccessorCache) get(key shard.Key) (*accessorWithBlockstore, error) { - abs, ok := bc.cache.Get(key) - if !ok { - return nil, errCacheMiss - } - return abs, nil -} - -// GetOrLoad attempts to get an item from the cache, and if not found, invokes -// the provided loader function to load it. -func (bc *AccessorCache) GetOrLoad( - ctx context.Context, - key shard.Key, - loader func(context.Context, shard.Key) (Accessor, error), -) (Accessor, error) { - lk := &bc.stripedLocks[shardKeyToStriped(key)] - lk.Lock() - defer lk.Unlock() - - abs, err := bc.get(key) - if err == nil { - // return accessor, only of it is not closed yet - accessorWithRef, err := newRefCloser(abs) - if err == nil { - bc.metrics.observeGet(true) - return accessorWithRef, nil - } - } - - // accessor not found in cache, so load new one using loader - accessor, err := loader(ctx, key) - if err != nil { - return nil, fmt.Errorf("unable to load accessor: %w", err) - } - - abs = &accessorWithBlockstore{ - shardAccessor: accessor, - } - - // Create a new accessor first to increment the reference count in it, so it cannot get evicted - // from the inner lru cache before it is used. - accessorWithRef, err := newRefCloser(abs) - if err != nil { - return nil, err - } - bc.cache.Add(key, abs) - return accessorWithRef, nil -} - -// Remove removes the Accessor for a given key from the cache. -func (bc *AccessorCache) Remove(key shard.Key) error { - lk := &bc.stripedLocks[shardKeyToStriped(key)] - lk.Lock() - accessor, err := bc.get(key) - lk.Unlock() - if errors.Is(err, errCacheMiss) { - // item is not in cache - return nil - } - if err = accessor.close(); err != nil { - return err - } - // The cache will call evictFn on removal, where accessor close will be called. - bc.cache.Remove(key) - return nil -} - -// EnableMetrics enables metrics for the cache. -func (bc *AccessorCache) EnableMetrics() (CloseMetricsFn, error) { - var err error - bc.metrics, err = newMetrics(bc) - if err != nil { - return nil, err - } - return bc.metrics.close, err -} - -// refCloser manages references to accessor from provided reader and removes the ref, when the -// Close is called -type refCloser struct { - *accessorWithBlockstore - closeFn func() -} - -// newRefCloser creates new refCloser -func newRefCloser(abs *accessorWithBlockstore) (*refCloser, error) { - if err := abs.addRef(); err != nil { - return nil, err - } - - var closeOnce sync.Once - return &refCloser{ - accessorWithBlockstore: abs, - closeFn: func() { - closeOnce.Do(abs.removeRef) - }, - }, nil -} - -func (c *refCloser) Close() error { - c.closeFn() - return nil -} - -// shardKeyToStriped returns the index of the lock to use for a given shard key. We use the last -// byte of the shard key as the pseudo-random index. -func shardKeyToStriped(sk shard.Key) byte { - return sk.String()[len(sk.String())-1] -} diff --git a/share/eds/cache/cache.go b/share/eds/cache/cache.go deleted file mode 100644 index ff38faafba..0000000000 --- a/share/eds/cache/cache.go +++ /dev/null @@ -1,49 +0,0 @@ -package cache - -import ( - "context" - "errors" - "io" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - logging "github.com/ipfs/go-log/v2" - "go.opentelemetry.io/otel" -) - -var ( - log = logging.Logger("share/eds/cache") - meter = otel.Meter("eds_store_cache") -) - -var errCacheMiss = errors.New("accessor not found in blockstore cache") - -type CloseMetricsFn func() error - -// Cache is an interface that defines the basic Cache operations. -type Cache interface { - // Get retrieves an item from the Cache. - Get(shard.Key) (Accessor, error) - - // GetOrLoad attempts to get an item from the Cache and, if not found, invokes - // the provided loader function to load it into the Cache. - GetOrLoad( - ctx context.Context, - key shard.Key, - loader func(context.Context, shard.Key) (Accessor, error), - ) (Accessor, error) - - // Remove removes an item from Cache. - Remove(shard.Key) error - - // EnableMetrics enables metrics in Cache - EnableMetrics() (CloseMetricsFn, error) -} - -// Accessor is a interface type returned by cache, that allows to read raw data by reader or create -// readblockstore -type Accessor interface { - Blockstore() (dagstore.ReadBlockstore, error) - Reader() io.Reader - io.Closer -} diff --git a/share/eds/cache/doublecache.go b/share/eds/cache/doublecache.go deleted file mode 100644 index a7f2a4871e..0000000000 --- a/share/eds/cache/doublecache.go +++ /dev/null @@ -1,62 +0,0 @@ -package cache - -import ( - "errors" - - "github.com/filecoin-project/dagstore/shard" -) - -// DoubleCache represents a Cache that looks into multiple caches one by one. -type DoubleCache struct { - first, second Cache -} - -// NewDoubleCache creates a new DoubleCache with the provided caches. -func NewDoubleCache(first, second Cache) *DoubleCache { - return &DoubleCache{ - first: first, - second: second, - } -} - -// Get looks for an item in all the caches one by one and returns the Cache found item. -func (mc *DoubleCache) Get(key shard.Key) (Accessor, error) { - ac, err := mc.first.Get(key) - if err == nil { - return ac, nil - } - return mc.second.Get(key) -} - -// Remove removes an item from all underlying caches -func (mc *DoubleCache) Remove(key shard.Key) error { - err1 := mc.first.Remove(key) - err2 := mc.second.Remove(key) - return errors.Join(err1, err2) -} - -func (mc *DoubleCache) First() Cache { - return mc.first -} - -func (mc *DoubleCache) Second() Cache { - return mc.second -} - -func (mc *DoubleCache) EnableMetrics() (CloseMetricsFn, error) { - firstCloser, err := mc.first.EnableMetrics() - if err != nil { - return nil, err - } - secondCloser, err := mc.second.EnableMetrics() - if err != nil { - return nil, err - } - - return func() error { - if err := errors.Join(firstCloser(), secondCloser()); err != nil { - log.Warnw("failed to close metrics", "err", err) - } - return nil - }, nil -} diff --git a/share/eds/cache/noop.go b/share/eds/cache/noop.go deleted file mode 100644 index 5d7444054e..0000000000 --- a/share/eds/cache/noop.go +++ /dev/null @@ -1,50 +0,0 @@ -package cache - -import ( - "context" - "io" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" -) - -var _ Cache = (*NoopCache)(nil) - -// NoopCache implements noop version of Cache interface -type NoopCache struct{} - -func (n NoopCache) Get(shard.Key) (Accessor, error) { - return nil, errCacheMiss -} - -func (n NoopCache) GetOrLoad( - context.Context, shard.Key, - func(context.Context, shard.Key) (Accessor, error), -) (Accessor, error) { - return NoopAccessor{}, nil -} - -func (n NoopCache) Remove(shard.Key) error { - return nil -} - -func (n NoopCache) EnableMetrics() (CloseMetricsFn, error) { - return func() error { return nil }, nil -} - -var _ Accessor = (*NoopAccessor)(nil) - -// NoopAccessor implements noop version of Accessor interface -type NoopAccessor struct{} - -func (n NoopAccessor) Blockstore() (dagstore.ReadBlockstore, error) { - return nil, nil //nolint:nilnil -} - -func (n NoopAccessor) Reader() io.Reader { - return nil -} - -func (n NoopAccessor) Close() error { - return nil -} diff --git a/share/eds/close_once.go b/share/eds/close_once.go new file mode 100644 index 0000000000..2150ff7232 --- /dev/null +++ b/share/eds/close_once.go @@ -0,0 +1,100 @@ +package eds + +import ( + "context" + "errors" + "io" + "sync/atomic" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ AccessorStreamer = (*closeOnce)(nil) + +var errAccessorClosed = errors.New("accessor is closed") + +type closeOnce struct { + f AccessorStreamer + closed atomic.Bool +} + +func WithClosedOnce(f AccessorStreamer) AccessorStreamer { + return &closeOnce{f: f} +} + +func (c *closeOnce) Close() error { + if c.closed.Swap(true) { + return nil + } + err := c.f.Close() + // release reference to the accessor to allow GC to collect all resources associated with it + c.f = nil + return err +} + +func (c *closeOnce) Size(ctx context.Context) int { + if c.closed.Load() { + return 0 + } + return c.f.Size(ctx) +} + +func (c *closeOnce) DataHash(ctx context.Context) (share.DataHash, error) { + if c.closed.Load() { + return nil, errAccessorClosed + } + return c.f.DataHash(ctx) +} + +func (c *closeOnce) AxisRoots(ctx context.Context) (*share.AxisRoots, error) { + if c.closed.Load() { + return nil, errAccessorClosed + } + return c.f.AxisRoots(ctx) +} + +func (c *closeOnce) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) { + if c.closed.Load() { + return shwap.Sample{}, errAccessorClosed + } + return c.f.Sample(ctx, rowIdx, colIdx) +} + +func (c *closeOnce) AxisHalf( + ctx context.Context, + axisType rsmt2d.Axis, + axisIdx int, +) (AxisHalf, error) { + if c.closed.Load() { + return AxisHalf{}, errAccessorClosed + } + return c.f.AxisHalf(ctx, axisType, axisIdx) +} + +func (c *closeOnce) RowNamespaceData( + ctx context.Context, + namespace share.Namespace, + rowIdx int, +) (shwap.RowNamespaceData, error) { + if c.closed.Load() { + return shwap.RowNamespaceData{}, errAccessorClosed + } + return c.f.RowNamespaceData(ctx, namespace, rowIdx) +} + +func (c *closeOnce) Shares(ctx context.Context) ([]share.Share, error) { + if c.closed.Load() { + return nil, errAccessorClosed + } + return c.f.Shares(ctx) +} + +func (c *closeOnce) Reader() (io.Reader, error) { + if c.closed.Load() { + return nil, errAccessorClosed + } + return c.f.Reader() +} diff --git a/share/eds/close_once_test.go b/share/eds/close_once_test.go new file mode 100644 index 0000000000..c31d9ba099 --- /dev/null +++ b/share/eds/close_once_test.go @@ -0,0 +1,88 @@ +package eds + +import ( + "context" + "io" + "testing" + "testing/iotest" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestWithClosedOnce(t *testing.T) { + ctx := context.Background() + stub := &stubEdsAccessorCloser{} + closedOnce := WithClosedOnce(stub) + + _, err := closedOnce.Sample(ctx, 0, 0) + require.NoError(t, err) + _, err = closedOnce.AxisHalf(ctx, rsmt2d.Row, 0) + require.NoError(t, err) + _, err = closedOnce.RowNamespaceData(ctx, share.Namespace{}, 0) + require.NoError(t, err) + _, err = closedOnce.Shares(ctx) + require.NoError(t, err) + + require.NoError(t, closedOnce.Close()) + require.True(t, stub.closed) + + // Ensure that the underlying file is not accessible after closing + _, err = closedOnce.Sample(ctx, 0, 0) + require.ErrorIs(t, err, errAccessorClosed) + _, err = closedOnce.AxisHalf(ctx, rsmt2d.Row, 0) + require.ErrorIs(t, err, errAccessorClosed) + _, err = closedOnce.RowNamespaceData(ctx, share.Namespace{}, 0) + require.ErrorIs(t, err, errAccessorClosed) + _, err = closedOnce.Shares(ctx) + require.ErrorIs(t, err, errAccessorClosed) +} + +type stubEdsAccessorCloser struct { + closed bool +} + +func (s *stubEdsAccessorCloser) Size(context.Context) int { + return 0 +} + +func (s *stubEdsAccessorCloser) DataHash(context.Context) (share.DataHash, error) { + return share.DataHash{}, nil +} + +func (s *stubEdsAccessorCloser) AxisRoots(context.Context) (*share.AxisRoots, error) { + return &share.AxisRoots{}, nil +} + +func (s *stubEdsAccessorCloser) Sample(context.Context, int, int) (shwap.Sample, error) { + return shwap.Sample{}, nil +} + +func (s *stubEdsAccessorCloser) AxisHalf(context.Context, rsmt2d.Axis, int) (AxisHalf, error) { + return AxisHalf{}, nil +} + +func (s *stubEdsAccessorCloser) RowNamespaceData( + context.Context, + share.Namespace, + int, +) (shwap.RowNamespaceData, error) { + return shwap.RowNamespaceData{}, nil +} + +func (s *stubEdsAccessorCloser) Shares(context.Context) ([]share.Share, error) { + return nil, nil +} + +func (s *stubEdsAccessorCloser) Reader() (io.Reader, error) { + return iotest.ErrReader(nil), nil +} + +func (s *stubEdsAccessorCloser) Close() error { + s.closed = true + return nil +} diff --git a/share/eds/eds.go b/share/eds/eds.go deleted file mode 100644 index 03753bd44c..0000000000 --- a/share/eds/eds.go +++ /dev/null @@ -1,342 +0,0 @@ -package eds - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/ipld/go-car/util" - "github.com/tendermint/tendermint/crypto/merkle" - corebytes "github.com/tendermint/tendermint/libs/bytes" - coretypes "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" - - pkgproof "github.com/celestiaorg/celestia-app/v2/pkg/proof" - "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" - "github.com/celestiaorg/go-square/shares" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -var ErrEmptySquare = errors.New("share: importing empty data") - -// WriteEDS writes the entire EDS into the given io.Writer as CARv1 file. -// This includes all shares in quadrant order, followed by all inner nodes of the NMT tree. -// Order: [ Carv1Header | Q1 | Q2 | Q3 | Q4 | inner nodes ] -// For more information about the header: https://ipld.io/specs/transport/car/carv1/#header -func WriteEDS(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) (err error) { - ctx, span := tracer.Start(ctx, "write-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - // Creates and writes Carv1Header. Roots are the eds Row + Col roots - err = writeHeader(eds, w) - if err != nil { - return fmt.Errorf("share: writing carv1 header: %w", err) - } - // Iterates over shares in quadrant order via eds.GetCell - err = writeQuadrants(eds, w) - if err != nil { - return fmt.Errorf("share: writing shares: %w", err) - } - - // Iterates over proofs and writes them to the CAR - err = writeProofs(ctx, eds, w) - if err != nil { - return fmt.Errorf("share: writing proofs: %w", err) - } - return nil -} - -// writeHeader creates a CarV1 header using the EDS's Row and Column roots as the list of DAG roots. -func writeHeader(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { - rootCids, err := rootsToCids(eds) - if err != nil { - return fmt.Errorf("getting root cids: %w", err) - } - - return car.WriteHeader(&car.CarHeader{ - Roots: rootCids, - Version: 1, - }, w) -} - -// writeQuadrants reorders the shares to quadrant order and writes them to the CARv1 file. -func writeQuadrants(eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { - hasher := nmt.NewNmtHasher(share.NewSHA256Hasher(), share.NamespaceSize, ipld.NMTIgnoreMaxNamespace) - shares := quadrantOrder(eds) - for _, share := range shares { - leaf, err := hasher.HashLeaf(share) - if err != nil { - return fmt.Errorf("hashing share: %w", err) - } - cid, err := ipld.CidFromNamespacedSha256(leaf) - if err != nil { - return fmt.Errorf("getting cid from share: %w", err) - } - err = util.LdWrite(w, cid.Bytes(), share) - if err != nil { - return fmt.Errorf("writing share to file: %w", err) - } - } - return nil -} - -// writeProofs iterates over the in-memory blockstore's keys and writes all inner nodes to the -// CARv1 file. -func writeProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare, w io.Writer) error { - // check if proofs are collected by ipld.ProofsAdder in previous reconstructions of eds - proofs, err := getProofs(ctx, eds) - if err != nil { - return fmt.Errorf("recomputing proofs: %w", err) - } - - for id, proof := range proofs { - err := util.LdWrite(w, id.Bytes(), proof) - if err != nil { - return fmt.Errorf("writing proof to the car: %w", err) - } - } - return nil -} - -func getProofs(ctx context.Context, eds *rsmt2d.ExtendedDataSquare) (map[cid.Cid][]byte, error) { - // check if there are proofs collected by ipld.ProofsAdder in previous reconstruction of eds - if adder := ipld.ProofsAdderFromCtx(ctx); adder != nil { - defer adder.Purge() - return adder.Proofs(), nil - } - - // recompute proofs from eds - shares := eds.Flattened() - shareCount := len(shares) - if shareCount == 0 { - return nil, ErrEmptySquare - } - odsWidth := int(math.Sqrt(float64(shareCount)) / 2) - - // this adder ignores leaves, so that they are not added to the store we iterate through in - // writeProofs - adder := ipld.NewProofsAdder(odsWidth * 2) - defer adder.Purge() - - eds, err := rsmt2d.ImportExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(odsWidth), - nmt.NodeVisitor(adder.VisitFn())), - ) - if err != nil { - return nil, fmt.Errorf("recomputing data square: %w", err) - } - // compute roots - if _, err = eds.RowRoots(); err != nil { - return nil, fmt.Errorf("computing row roots: %w", err) - } - - return adder.Proofs(), nil -} - -// quadrantOrder reorders the shares in the EDS to quadrant row-by-row order, prepending the -// respective namespace to the shares. -// e.g. [ Q1 R1 | Q1 R2 | Q1 R3 | Q1 R4 | Q2 R1 | Q2 R2 .... ] -func quadrantOrder(eds *rsmt2d.ExtendedDataSquare) [][]byte { - size := eds.Width() * eds.Width() - shares := make([][]byte, size) - - quadrantWidth := int(eds.Width() / 2) - quadrantSize := quadrantWidth * quadrantWidth - for i := 0; i < quadrantWidth; i++ { - for j := 0; j < quadrantWidth; j++ { - cells := getQuadrantCells(eds, uint(i), uint(j)) - innerOffset := i*quadrantWidth + j - for quadrant := 0; quadrant < 4; quadrant++ { - shares[(quadrant*quadrantSize)+innerOffset] = prependNamespace(quadrant, cells[quadrant]) - } - } - } - return shares -} - -// getQuadrantCells returns the cell of each EDS quadrant with the passed inner-quadrant coordinates -func getQuadrantCells(eds *rsmt2d.ExtendedDataSquare, i, j uint) [][]byte { - cells := make([][]byte, 4) - quadrantWidth := eds.Width() / 2 - cells[0] = eds.GetCell(i, j) - cells[1] = eds.GetCell(i, j+quadrantWidth) - cells[2] = eds.GetCell(i+quadrantWidth, j) - cells[3] = eds.GetCell(i+quadrantWidth, j+quadrantWidth) - return cells -} - -// prependNamespace adds the namespace to the passed share if in the first quadrant, -// otherwise it adds the ParitySharesNamespace to the beginning. -func prependNamespace(quadrant int, shr share.Share) []byte { - namespacedShare := make([]byte, 0, share.NamespaceSize+share.Size) - switch quadrant { - case 0: - return append(append(namespacedShare, share.GetNamespace(shr)...), shr...) - case 1, 2, 3: - return append(append(namespacedShare, share.ParitySharesNamespace...), shr...) - default: - panic("invalid quadrant") - } -} - -// rootsToCids converts the EDS's Row and Column roots to CIDs. -func rootsToCids(eds *rsmt2d.ExtendedDataSquare) ([]cid.Cid, error) { - rowRoots, err := eds.RowRoots() - if err != nil { - return nil, err - } - colRoots, err := eds.ColRoots() - if err != nil { - return nil, err - } - - roots := make([][]byte, 0, len(rowRoots)+len(colRoots)) - roots = append(roots, rowRoots...) - roots = append(roots, colRoots...) - rootCids := make([]cid.Cid, len(roots)) - for i, r := range roots { - rootCids[i], err = ipld.CidFromNamespacedSha256(r) - if err != nil { - return nil, fmt.Errorf("getting cid from root: %w", err) - } - } - return rootCids, nil -} - -// ReadEDS reads the first EDS quadrant (1/4) from an io.Reader CAR file. -// Only the first quadrant will be read, which represents the original data. -// The returned EDS is guaranteed to be full and valid against the DataRoot, otherwise ReadEDS -// errors. -func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { - _, span := tracer.Start(ctx, "read-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - carReader, err := car.NewCarReader(r) - if err != nil { - return nil, fmt.Errorf("share: reading car file: %w", err) - } - - // car header includes both row and col roots in header - odsWidth := len(carReader.Header.Roots) / 4 - odsSquareSize := odsWidth * odsWidth - shares := make([][]byte, odsSquareSize) - // the first quadrant is stored directly after the header, - // so we can just read the first odsSquareSize blocks - for i := 0; i < odsSquareSize; i++ { - block, err := carReader.Next() - if err != nil { - return nil, fmt.Errorf("share: reading next car entry: %w", err) - } - // the stored first quadrant shares are wrapped with the namespace twice. - // we cut it off here, because it is added again while importing to the tree below - shares[i] = share.GetData(block.RawData()) - } - - // use proofs adder if provided, to cache collected proofs while recomputing the eds - var opts []nmt.Option - visitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() - if visitor != nil { - opts = append(opts, nmt.NodeVisitor(visitor)) - } - - eds, err = rsmt2d.ComputeExtendedDataSquare( - shares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(odsWidth), opts...), - ) - if err != nil { - return nil, fmt.Errorf("share: computing eds: %w", err) - } - - newDah, err := share.NewRoot(eds) - if err != nil { - return nil, err - } - if !bytes.Equal(newDah.Hash(), root) { - return nil, fmt.Errorf( - "share: content integrity mismatch: imported root %s doesn't match expected root %s", - newDah.Hash(), - root, - ) - } - return eds, nil -} - -// ProveShares generates a share proof for a share range. -// The share range, defined by start and end, is end-exclusive. -func ProveShares(eds *rsmt2d.ExtendedDataSquare, start, end int) (*types.ShareProof, error) { - log.Debugw("proving share range", "start", start, "end", end) - - odsShares, err := shares.FromBytes(eds.FlattenedODS()) - if err != nil { - return nil, err - } - nID, err := pkgproof.ParseNamespace(odsShares, start, end) - if err != nil { - return nil, err - } - log.Debugw("generating the share proof", "start", start, "end", end) - proof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(start, end)) - if err != nil { - return nil, err - } - coreProof := toCoreShareProof(proof) - return &coreProof, nil -} - -// toCoreShareProof utility function that converts a share proof defined in app -// to the share proof defined in node. -// This will be removed once we unify both these proofs. -// Reference issue: https://github.com/celestiaorg/celestia-app/issues/3734 -func toCoreShareProof(appShareProof pkgproof.ShareProof) types.ShareProof { - shareProofs := make([]*coretypes.NMTProof, 0) - for _, proof := range appShareProof.ShareProofs { - shareProofs = append(shareProofs, &coretypes.NMTProof{ - Start: proof.Start, - End: proof.End, - Nodes: proof.Nodes, - LeafHash: proof.LeafHash, - }) - } - - rowRoots := make([]corebytes.HexBytes, 0) - rowProofs := make([]*merkle.Proof, 0) - for index, proof := range appShareProof.RowProof.Proofs { - rowRoots = append(rowRoots, appShareProof.RowProof.RowRoots[index]) - rowProofs = append(rowProofs, &merkle.Proof{ - Total: proof.Total, - Index: proof.Index, - LeafHash: proof.LeafHash, - Aunts: proof.Aunts, - }) - } - - return types.ShareProof{ - Data: appShareProof.Data, - ShareProofs: shareProofs, - NamespaceID: appShareProof.NamespaceId, - RowProof: types.RowProof{ - RowRoots: rowRoots, - Proofs: rowProofs, - StartRow: appShareProof.RowProof.StartRow, - EndRow: appShareProof.RowProof.EndRow, - }, - NamespaceVersion: appShareProof.NamespaceVersion, - } -} diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go deleted file mode 100644 index fb1c13eaf1..0000000000 --- a/share/eds/eds_test.go +++ /dev/null @@ -1,350 +0,0 @@ -package eds - -import ( - "bytes" - "context" - "embed" - "encoding/json" - "fmt" - "os" - "testing" - - bstore "github.com/ipfs/boxo/blockstore" - ds "github.com/ipfs/go-datastore" - carv1 "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/rand" - coretypes "github.com/tendermint/tendermint/types" - - "github.com/celestiaorg/celestia-app/v2/pkg/appconsts" - "github.com/celestiaorg/celestia-app/v2/pkg/da" - pkgproof "github.com/celestiaorg/celestia-app/v2/pkg/proof" - "github.com/celestiaorg/go-square/namespace" - "github.com/celestiaorg/go-square/shares" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/edstest" -) - -//go:embed "testdata/example-root.json" -var exampleRoot string - -//go:embed "testdata/example.car" -var f embed.FS - -func TestQuadrantOrder(t *testing.T) { - testCases := []struct { - name string - squareSize int - }{ - {"smol", 2}, - {"still smol", 8}, - {"default mainnet", appconsts.DefaultGovMaxSquareSize}, - {"max", share.MaxSquareSize}, - } - - testShareSize := 64 - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - shares := make([][]byte, tc.squareSize*tc.squareSize) - - for i := 0; i < tc.squareSize*tc.squareSize; i++ { - shares[i] = rand.Bytes(testShareSize) - } - - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), rsmt2d.NewDefaultTree) - require.NoError(t, err) - - res := quadrantOrder(eds) - for _, s := range res { - require.Len(t, s, testShareSize+share.NamespaceSize) - } - - for q := 0; q < 4; q++ { - for i := 0; i < tc.squareSize; i++ { - for j := 0; j < tc.squareSize; j++ { - resIndex := q*tc.squareSize*tc.squareSize + i*tc.squareSize + j - edsRow := q/2*tc.squareSize + i - edsCol := (q%2)*tc.squareSize + j - - assert.Equal(t, res[resIndex], prependNamespace(q, eds.Row(uint(edsRow))[edsCol])) - } - } - } - }) - } -} - -func TestWriteEDS(t *testing.T) { - writeRandomEDS(t) -} - -func TestWriteEDSHeaderRoots(t *testing.T) { - eds := writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - reader, err := carv1.NewCarReader(f) - require.NoError(t, err, "error creating car reader") - roots, err := rootsToCids(eds) - require.NoError(t, err, "error converting roots to cids") - require.Equal(t, roots, reader.Header.Roots) -} - -func TestWriteEDSStartsWithLeaves(t *testing.T) { - eds := writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - reader, err := carv1.NewCarReader(f) - require.NoError(t, err, "error creating car reader") - block, err := reader.Next() - require.NoError(t, err, "error getting first block") - - require.Equal(t, share.GetData(block.RawData()), eds.GetCell(0, 0)) -} - -func TestWriteEDSIncludesRoots(t *testing.T) { - writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - bs := bstore.NewBlockstore(ds.NewMapDatastore()) - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - loaded, err := carv1.LoadCar(ctx, bs, f) - require.NoError(t, err, "error loading car file") - for _, root := range loaded.Roots { - ok, err := bs.Has(context.Background(), root) - require.NoError(t, err, "error checking if blockstore has root") - require.True(t, ok, "blockstore does not have root") - } -} - -func TestWriteEDSInQuadrantOrder(t *testing.T) { - eds := writeRandomEDS(t) - f := openWrittenEDS(t) - defer f.Close() - - reader, err := carv1.NewCarReader(f) - require.NoError(t, err, "error creating car reader") - - shares := quadrantOrder(eds) - for i := 0; i < len(shares); i++ { - block, err := reader.Next() - require.NoError(t, err, "error getting block") - require.Equal(t, block.RawData(), shares[i]) - } -} - -func TestReadWriteRoundtrip(t *testing.T) { - eds := writeRandomEDS(t) - dah, err := share.NewRoot(eds) - require.NoError(t, err) - f := openWrittenEDS(t) - defer f.Close() - - loaded, err := ReadEDS(context.Background(), f, dah.Hash()) - require.NoError(t, err, "error reading EDS from file") - - rowRoots, err := eds.RowRoots() - require.NoError(t, err) - loadedRowRoots, err := loaded.RowRoots() - require.NoError(t, err) - require.Equal(t, rowRoots, loadedRowRoots) - - colRoots, err := eds.ColRoots() - require.NoError(t, err) - loadedColRoots, err := loaded.ColRoots() - require.NoError(t, err) - require.Equal(t, colRoots, loadedColRoots) -} - -func TestReadEDS(t *testing.T) { - f, err := f.Open("testdata/example.car") - require.NoError(t, err, "error opening file") - - var dah da.DataAvailabilityHeader - err = json.Unmarshal([]byte(exampleRoot), &dah) - require.NoError(t, err, "error unmarshaling example root") - - loaded, err := ReadEDS(context.Background(), f, dah.Hash()) - require.NoError(t, err, "error reading EDS from file") - rowRoots, err := loaded.RowRoots() - require.NoError(t, err) - require.Equal(t, dah.RowRoots, rowRoots) - colRoots, err := loaded.ColRoots() - require.NoError(t, err) - require.Equal(t, dah.ColumnRoots, colRoots) -} - -func TestReadEDSContentIntegrityMismatch(t *testing.T) { - writeRandomEDS(t) - dah, err := da.NewDataAvailabilityHeader(edstest.RandEDS(t, 4)) - require.NoError(t, err) - f := openWrittenEDS(t) - defer f.Close() - - _, err = ReadEDS(context.Background(), f, dah.Hash()) - require.ErrorContains(t, err, "share: content integrity mismatch: imported root") -} - -// BenchmarkReadWriteEDS benchmarks the time it takes to write and read an EDS from disk. The -// benchmark is run with a 4x4 ODS to a 64x64 ODS - a higher value can be used, but it will run for -// much longer. -func BenchmarkReadWriteEDS(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - b.Cleanup(cancel) - for originalDataWidth := 4; originalDataWidth <= 64; originalDataWidth *= 2 { - eds := edstest.RandEDS(b, originalDataWidth) - dah, err := share.NewRoot(eds) - require.NoError(b, err) - b.Run(fmt.Sprintf("Writing %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - f := new(bytes.Buffer) - err := WriteEDS(ctx, eds, f) - require.NoError(b, err) - } - }) - b.Run(fmt.Sprintf("Reading %dx%d", originalDataWidth, originalDataWidth), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - b.StopTimer() - f := new(bytes.Buffer) - _ = WriteEDS(ctx, eds, f) - b.StartTimer() - _, err := ReadEDS(ctx, f, dah.Hash()) - require.NoError(b, err) - } - }) - } -} - -func writeRandomEDS(t *testing.T) *rsmt2d.ExtendedDataSquare { - t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - tmpDir := t.TempDir() - err := os.Chdir(tmpDir) - require.NoError(t, err, "error changing to the temporary test directory") - f, err := os.OpenFile("test.car", os.O_WRONLY|os.O_CREATE, 0o600) - require.NoError(t, err, "error opening file") - - eds := edstest.RandEDS(t, 4) - err = WriteEDS(ctx, eds, f) - require.NoError(t, err, "error writing EDS to file") - f.Close() - return eds -} - -func openWrittenEDS(t *testing.T) *os.File { - t.Helper() - f, err := os.OpenFile("test.car", os.O_RDONLY, 0o600) - require.NoError(t, err, "error opening file") - return f -} - -/* -use this function as needed to create new test data. - -example: - - func Test_CreateData(t *testing.T) { - createTestData(t, "celestia-node/share/eds/testdata") - } -*/ -func createTestData(t *testing.T, testDir string) { //nolint:unused - t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - err := os.Chdir(testDir) - require.NoError(t, err, "changing to the directory") - os.RemoveAll("example.car") - require.NoError(t, err, "removing old file") - f, err := os.OpenFile("example.car", os.O_WRONLY|os.O_CREATE, 0o600) - require.NoError(t, err, "opening file") - - eds := edstest.RandEDS(t, 4) - err = WriteEDS(ctx, eds, f) - require.NoError(t, err, "writing EDS to file") - f.Close() - dah, err := share.NewRoot(eds) - require.NoError(t, err) - - header, err := json.MarshalIndent(dah, "", "") - require.NoError(t, err, "marshaling example root") - os.RemoveAll("example-root.json") - require.NoError(t, err, "removing old file") - f, err = os.OpenFile("example-root.json", os.O_WRONLY|os.O_CREATE, 0o600) - require.NoError(t, err, "opening file") - _, err = f.Write(header) - require.NoError(t, err, "writing example root to file") - f.Close() -} - -func TestProveShares(t *testing.T) { - ns := namespace.RandomBlobNamespace() - eds, dataRoot := edstest.RandEDSWithNamespace( - t, - ns.Bytes(), - 16, - ) - - tests := map[string]struct { - start, end int - expectedProof coretypes.ShareProof - expectErr bool - }{ - "start share == end share": { - start: 2, - end: 2, - expectErr: true, - }, - "start share > end share": { - start: 3, - end: 2, - expectErr: true, - }, - "start share > number of shares in the block": { - start: 2000, - end: 2010, - expectErr: true, - }, - "end share > number of shares in the block": { - start: 1, - end: 2010, - expectErr: true, - }, - "valid case": { - start: 0, - end: 2, - expectedProof: func() coretypes.ShareProof { - proof, err := pkgproof.NewShareInclusionProofFromEDS( - eds, - ns, - shares.NewRange(0, 2), - ) - require.NoError(t, err) - require.NoError(t, proof.Validate(dataRoot.Hash())) - return toCoreShareProof(proof) - }(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - result, err := ProveShares(eds, tc.start, tc.end) - if tc.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expectedProof, *result) - assert.NoError(t, result.Validate(dataRoot.Hash())) - } - }) - } -} diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index c5131f8656..56d5b3f4b7 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -1,6 +1,7 @@ package edstest import ( + "crypto/rand" "testing" "github.com/stretchr/testify/require" @@ -17,7 +18,7 @@ import ( "github.com/celestiaorg/celestia-app/v2/x/blob/types" "github.com/celestiaorg/go-square/blob" "github.com/celestiaorg/go-square/namespace" - "github.com/celestiaorg/go-square/shares" + appshares "github.com/celestiaorg/go-square/shares" "github.com/celestiaorg/go-square/square" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -31,37 +32,83 @@ const ( testChainID = "private" ) -func RandByzantineEDS(t testing.TB, size int, options ...nmt.Option) *rsmt2d.ExtendedDataSquare { - eds := RandEDS(t, size) +func RandByzantineEDS(t testing.TB, odsSize int, options ...nmt.Option) *rsmt2d.ExtendedDataSquare { + eds := RandEDS(t, odsSize) shares := eds.Flattened() copy(share.GetData(shares[0]), share.GetData(shares[1])) // corrupting eds - eds, err := rsmt2d.ImportExtendedDataSquare(shares, + eds, err := rsmt2d.ImportExtendedDataSquare( + shares, share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(size), - options...)) + wrapper.NewConstructor(uint64(odsSize), options...), + ) require.NoError(t, err, "failure to recompute the extended data square") return eds } // RandEDS generates EDS filled with the random data with the given size for original square. -func RandEDS(t testing.TB, size int) *rsmt2d.ExtendedDataSquare { - shares := sharetest.RandShares(t, size*size) - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) +func RandEDS(t testing.TB, odsSize int) *rsmt2d.ExtendedDataSquare { + shares := sharetest.RandShares(t, odsSize*odsSize) + eds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsSize)), + ) require.NoError(t, err, "failure to recompute the extended data square") return eds } +// RandEDSWithTailPadding generates EDS of given ODS size filled with randomized and tail padding shares. +func RandEDSWithTailPadding(t testing.TB, odsSize, padding int) *rsmt2d.ExtendedDataSquare { + shares := sharetest.RandShares(t, odsSize*odsSize) + for i := len(shares) - padding; i < len(shares); i++ { + paddingShare := appshares.TailPaddingShare() + shares[i] = paddingShare.ToBytes() + } + + eds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsSize)), + ) + require.NoError(t, err, "failure to recompute the extended data square") + return eds +} + +// RandEDSWithNamespace generates EDS with given square size. Returned EDS will have +// namespacedAmount of shares with the given namespace. func RandEDSWithNamespace( t testing.TB, namespace share.Namespace, - size int, -) (*rsmt2d.ExtendedDataSquare, *share.Root) { - shares := sharetest.RandSharesWithNamespace(t, namespace, size*size) - eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + namespacedAmount, odsSize int, +) (*rsmt2d.ExtendedDataSquare, *share.AxisRoots) { + shares := sharetest.RandSharesWithNamespace(t, namespace, namespacedAmount, odsSize*odsSize) + eds, err := rsmt2d.ComputeExtendedDataSquare( + shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(odsSize)), + ) require.NoError(t, err, "failure to recompute the extended data square") - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - return eds, dah + return eds, roots +} + +// RandomAxisRoots generates random share.AxisRoots for the given eds size. +func RandomAxisRoots(t testing.TB, edsSize int) *share.AxisRoots { + roots := make([][]byte, edsSize*2) + for i := range roots { + root := make([]byte, edsSize) + _, err := rand.Read(root) + require.NoError(t, err) + roots[i] = root + } + + rows := roots[:edsSize] + cols := roots[edsSize:] + return &share.AxisRoots{ + RowRoots: rows, + ColumnRoots: cols, + } } // GenerateTestBlock generates a set of test blocks with a specific blob size and number of @@ -94,7 +141,7 @@ func GenerateTestBlock( require.NoError(t, err) // erasure the data square which we use to create the data root. - eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) + eds, err := da.ExtendShares(appshares.ToBytes(dataSquare)) require.NoError(t, err) // create the new data root by creating the data availability header (merkle diff --git a/share/eds/inverted_index.go b/share/eds/inverted_index.go deleted file mode 100644 index 799ab6208d..0000000000 --- a/share/eds/inverted_index.go +++ /dev/null @@ -1,102 +0,0 @@ -package eds - -import ( - "context" - "errors" - "fmt" - "runtime" - - "github.com/dgraph-io/badger/v4/options" - "github.com/filecoin-project/dagstore/index" - "github.com/filecoin-project/dagstore/shard" - ds "github.com/ipfs/go-datastore" - dsbadger "github.com/ipfs/go-ds-badger4" - "github.com/multiformats/go-multihash" -) - -const invertedIndexPath = "/inverted_index/" - -// ErrNotFoundInIndex is returned instead of ErrNotFound if the multihash doesn't exist in the index -var ErrNotFoundInIndex = errors.New("does not exist in index") - -// simpleInvertedIndex is an inverted index that only stores a single shard key per multihash. Its -// implementation is modified from the default upstream implementation in dagstore/index. -type simpleInvertedIndex struct { - ds ds.Batching -} - -// newSimpleInvertedIndex returns a new inverted index that only stores a single shard key per -// multihash. This is because we use badger as a storage backend, so updates are expensive, and we -// don't care which shard is used to serve a cid. -func newSimpleInvertedIndex(storePath string) (*simpleInvertedIndex, error) { - opts := dsbadger.DefaultOptions // this should be copied - // turn off value log GC as we don't use value log - opts.GcInterval = 0 - // use minimum amount of NumLevelZeroTables to trigger L0 compaction faster - opts.NumLevelZeroTables = 1 - // MaxLevels = 8 will allow the db to grow to ~11.1 TiB - opts.MaxLevels = 8 - // inverted index stores unique hash keys, so we don't need to detect conflicts - opts.DetectConflicts = false - // we don't need compression for inverted index as it just hashes - opts.Compression = options.None - compactors := runtime.NumCPU() - if compactors < 2 { - compactors = 2 - } - if compactors > opts.MaxLevels { // ensure there is no more compactors than db table levels - compactors = opts.MaxLevels - } - opts.NumCompactors = compactors - - ds, err := dsbadger.NewDatastore(storePath+invertedIndexPath, &opts) - if err != nil { - return nil, fmt.Errorf("can't open Badger Datastore: %w", err) - } - - return &simpleInvertedIndex{ds: ds}, nil -} - -func (s *simpleInvertedIndex) AddMultihashesForShard( - ctx context.Context, - mhIter index.MultihashIterator, - sk shard.Key, -) error { - // in the original implementation, a mutex is used here to prevent unnecessary updates to the - // key. The amount of extra data produced by this is negligible, and the performance benefits - // from removing the lock are significant (indexing is a hot path during sync). - batch, err := s.ds.Batch(ctx) - if err != nil { - return fmt.Errorf("failed to create ds batch: %w", err) - } - - err = mhIter.ForEach(func(mh multihash.Multihash) error { - key := ds.NewKey(string(mh)) - if err := batch.Put(ctx, key, []byte(sk.String())); err != nil { - return fmt.Errorf("failed to put mh=%s, err=%w", mh, err) - } - return nil - }) - if err != nil { - return fmt.Errorf("failed to add index entry: %w", err) - } - - if err := batch.Commit(ctx); err != nil { - return fmt.Errorf("failed to commit batch: %w", err) - } - return nil -} - -func (s *simpleInvertedIndex) GetShardsForMultihash(ctx context.Context, mh multihash.Multihash) ([]shard.Key, error) { - key := ds.NewKey(string(mh)) - sbz, err := s.ds.Get(ctx, key) - if err != nil { - return nil, errors.Join(ErrNotFoundInIndex, err) - } - - return []shard.Key{shard.KeyFromString(string(sbz))}, nil -} - -func (s *simpleInvertedIndex) close() error { - return s.ds.Close() -} diff --git a/share/eds/inverted_index_test.go b/share/eds/inverted_index_test.go deleted file mode 100644 index e83c2be267..0000000000 --- a/share/eds/inverted_index_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package eds - -import ( - "context" - "testing" - - "github.com/filecoin-project/dagstore/shard" - "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/require" -) - -type mockIterator struct { - mhs []multihash.Multihash -} - -func (m *mockIterator) ForEach(f func(mh multihash.Multihash) error) error { - for _, mh := range m.mhs { - if err := f(mh); err != nil { - return err - } - } - return nil -} - -// TestMultihashesForShard ensures that the inverted index correctly stores a single shard key per -// duplicate multihash -func TestMultihashesForShard(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mhs := []multihash.Multihash{ - multihash.Multihash("mh1"), - multihash.Multihash("mh2"), - multihash.Multihash("mh3"), - } - - mi := &mockIterator{mhs: mhs} - path := t.TempDir() - invertedIndex, err := newSimpleInvertedIndex(path) - require.NoError(t, err) - - // 1. Add all 3 multihashes to shard1 - err = invertedIndex.AddMultihashesForShard(ctx, mi, shard.KeyFromString("shard1")) - require.NoError(t, err) - shardKeys, err := invertedIndex.GetShardsForMultihash(ctx, mhs[0]) - require.NoError(t, err) - require.Equal(t, []shard.Key{shard.KeyFromString("shard1")}, shardKeys) - - // 2. Add mh1 to shard2, and ensure that mh1 no longer points to shard1 - err = invertedIndex.AddMultihashesForShard(ctx, &mockIterator{mhs: mhs[:1]}, shard.KeyFromString("shard2")) - require.NoError(t, err) - shardKeys, err = invertedIndex.GetShardsForMultihash(ctx, mhs[0]) - require.NoError(t, err) - require.Equal(t, []shard.Key{shard.KeyFromString("shard2")}, shardKeys) -} diff --git a/share/eds/metrics.go b/share/eds/metrics.go deleted file mode 100644 index 1ce9fe459d..0000000000 --- a/share/eds/metrics.go +++ /dev/null @@ -1,286 +0,0 @@ -package eds - -import ( - "context" - "errors" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - - "github.com/celestiaorg/celestia-node/libs/utils" -) - -const ( - failedKey = "failed" - sizeKey = "eds_size" - - putResultKey = "result" - putOK putResult = "ok" - putExists putResult = "exists" - putFailed putResult = "failed" - - opNameKey = "op" - longOpResultKey = "result" - longOpUnresolved longOpResult = "unresolved" - longOpOK longOpResult = "ok" - longOpFailed longOpResult = "failed" - - dagstoreShardStatusKey = "shard_status" -) - -var meter = otel.Meter("eds_store") - -type putResult string - -type longOpResult string - -type metrics struct { - putTime metric.Float64Histogram - getCARTime metric.Float64Histogram - getCARBlockstoreTime metric.Float64Histogram - getDAHTime metric.Float64Histogram - removeTime metric.Float64Histogram - getTime metric.Float64Histogram - hasTime metric.Float64Histogram - listTime metric.Float64Histogram - - shardFailureCount metric.Int64Counter - - longOpTime metric.Float64Histogram - gcTime metric.Float64Histogram - - clientReg metric.Registration - closerFn func() error -} - -func (s *Store) WithMetrics() error { - putTime, err := meter.Float64Histogram("eds_store_put_time_histogram", - metric.WithDescription("eds store put time histogram(s)")) - if err != nil { - return err - } - - getCARTime, err := meter.Float64Histogram("eds_store_get_car_time_histogram", - metric.WithDescription("eds store get car time histogram(s)")) - if err != nil { - return err - } - - getCARBlockstoreTime, err := meter.Float64Histogram("eds_store_get_car_blockstore_time_histogram", - metric.WithDescription("eds store get car blockstore time histogram(s)")) - if err != nil { - return err - } - - getDAHTime, err := meter.Float64Histogram("eds_store_get_dah_time_histogram", - metric.WithDescription("eds store get dah time histogram(s)")) - if err != nil { - return err - } - - removeTime, err := meter.Float64Histogram("eds_store_remove_time_histogram", - metric.WithDescription("eds store remove time histogram(s)")) - if err != nil { - return err - } - - getTime, err := meter.Float64Histogram("eds_store_get_time_histogram", - metric.WithDescription("eds store get time histogram(s)")) - if err != nil { - return err - } - - hasTime, err := meter.Float64Histogram("eds_store_has_time_histogram", - metric.WithDescription("eds store has time histogram(s)")) - if err != nil { - return err - } - - listTime, err := meter.Float64Histogram("eds_store_list_time_histogram", - metric.WithDescription("eds store list time histogram(s)")) - if err != nil { - return err - } - - shardFailureCount, err := meter.Int64Counter("eds_store_shard_failure_counter", - metric.WithDescription("eds store OpShardFail counter")) - if err != nil { - return err - } - - longOpTime, err := meter.Float64Histogram("eds_store_long_operation_time_histogram", - metric.WithDescription("eds store long operation time histogram(s)")) - if err != nil { - return err - } - - gcTime, err := meter.Float64Histogram("eds_store_gc_time", - metric.WithDescription("dagstore gc time histogram(s)")) - if err != nil { - return err - } - - dagStoreShards, err := meter.Int64ObservableGauge("eds_store_dagstore_shards", - metric.WithDescription("dagstore amount of shards by status")) - if err != nil { - return err - } - - closerFn, err := s.cache.Load().EnableMetrics() - if err != nil { - return err - } - - callback := func(_ context.Context, observer metric.Observer) error { - stats := s.dgstr.Stats() - for status, amount := range stats { - observer.ObserveInt64(dagStoreShards, int64(amount), - metric.WithAttributes( - attribute.String(dagstoreShardStatusKey, status.String()), - )) - } - return nil - } - - clientReg, err := meter.RegisterCallback(callback, dagStoreShards) - if err != nil { - return err - } - - s.metrics = &metrics{ - putTime: putTime, - getCARTime: getCARTime, - getCARBlockstoreTime: getCARBlockstoreTime, - getDAHTime: getDAHTime, - removeTime: removeTime, - getTime: getTime, - hasTime: hasTime, - listTime: listTime, - shardFailureCount: shardFailureCount, - longOpTime: longOpTime, - gcTime: gcTime, - clientReg: clientReg, - closerFn: closerFn, - } - return nil -} - -func (m *metrics) close() error { - if m == nil { - return nil - } - - return errors.Join(m.closerFn(), m.clientReg.Unregister()) -} - -func (m *metrics) observeGCtime(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - m.gcTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeShardFailure(ctx context.Context, shardKey string) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.shardFailureCount.Add(ctx, 1, metric.WithAttributes(attribute.String("shard_key", shardKey))) -} - -func (m *metrics) observePut(ctx context.Context, dur time.Duration, result putResult, size uint) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.putTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.String(putResultKey, string(result)), - attribute.Int(sizeKey, int(size)))) -} - -func (m *metrics) observeLongOp(ctx context.Context, opName string, dur time.Duration, result longOpResult) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.longOpTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.String(opNameKey, opName), - attribute.String(longOpResultKey, string(result)))) -} - -func (m *metrics) observeGetCAR(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.getCARTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeCARBlockstore(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.getCARBlockstoreTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeGetDAH(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.getDAHTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeRemove(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.removeTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.getTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.hasTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} - -func (m *metrics) observeList(ctx context.Context, dur time.Duration, failed bool) { - if m == nil { - return - } - ctx = utils.ResetContextOnError(ctx) - - m.listTime.Record(ctx, dur.Seconds(), metric.WithAttributes( - attribute.Bool(failedKey, failed))) -} diff --git a/share/eds/nd.go b/share/eds/nd.go new file mode 100644 index 0000000000..36de7a0234 --- /dev/null +++ b/share/eds/nd.go @@ -0,0 +1,33 @@ +package eds + +import ( + "context" + "fmt" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +// NamespaceData extracts shares for a specific namespace from an EDS, considering +// each row independently. It uses root to determine which rows to extract data from, +// avoiding the need to recalculate the row roots for each row. +func NamespaceData( + ctx context.Context, + eds Accessor, + namespace share.Namespace, +) (shwap.NamespaceData, error) { + roots, err := eds.AxisRoots(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get AxisRoots: %w", err) + } + rowIdxs := share.RowsWithNamespace(roots, namespace) + rows := make(shwap.NamespaceData, len(rowIdxs)) + for i, idx := range rowIdxs { + rows[i], err = eds.RowNamespaceData(ctx, namespace, idx) + if err != nil { + return nil, fmt.Errorf("failed to process row %d: %w", idx, err) + } + } + + return rows, nil +} diff --git a/share/eds/nd_test.go b/share/eds/nd_test.go new file mode 100644 index 0000000000..d711c811bd --- /dev/null +++ b/share/eds/nd_test.go @@ -0,0 +1,32 @@ +package eds + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestNamespaceData(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + const odsSize = 8 + sharesAmount := odsSize * odsSize + namespace := sharetest.RandV0Namespace() + for amount := 1; amount < sharesAmount; amount++ { + eds, root := edstest.RandEDSWithNamespace(t, namespace, amount, odsSize) + rsmt2d := &Rsmt2D{ExtendedDataSquare: eds} + nd, err := NamespaceData(ctx, rsmt2d, namespace) + require.NoError(t, err) + require.True(t, len(nd) > 0) + require.Len(t, nd.Flatten(), amount) + + err = nd.Verify(root, namespace) + require.NoError(t, err) + } +} diff --git a/share/eds/ods.go b/share/eds/ods.go deleted file mode 100644 index a924e1c235..0000000000 --- a/share/eds/ods.go +++ /dev/null @@ -1,98 +0,0 @@ -package eds - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/ipld/go-car" - "github.com/ipld/go-car/util" -) - -// bufferedODSReader will read odsSquareSize amount of leaves from reader into the buffer. -// It exposes the buffer to be read by io.Reader interface implementation -type bufferedODSReader struct { - carReader *bufio.Reader - // current is the amount of CARv1 encoded leaves that have been read from reader. When current - // reaches odsSquareSize, bufferedODSReader will prevent further reads by returning io.EOF - current, odsSquareSize int - buf *bytes.Buffer -} - -// ODSReader reads CARv1 encoded data from io.ReadCloser and limits the reader to the CAR header -// and first quadrant (ODS) -func ODSReader(carReader io.Reader) (io.Reader, error) { - if carReader == nil { - return nil, errors.New("eds: can't create ODSReader over nil reader") - } - - odsR := &bufferedODSReader{ - carReader: bufio.NewReader(carReader), - buf: new(bytes.Buffer), - } - - // first LdRead reads the full CAR header to determine amount of shares in the ODS - data, err := util.LdRead(odsR.carReader) - if err != nil { - return nil, fmt.Errorf("reading header: %w", err) - } - - var header car.CarHeader - err = cbor.DecodeInto(data, &header) - if err != nil { - return nil, fmt.Errorf("invalid header: %w", err) - } - - // car header contains both row roots and col roots which is why - // we divide by 4 to get the ODSWidth - odsWidth := len(header.Roots) / 4 - odsR.odsSquareSize = odsWidth * odsWidth - - // NewCarReader will expect to read the header first, so write it first - return odsR, util.LdWrite(odsR.buf, data) -} - -func (r *bufferedODSReader) Read(p []byte) (n int, err error) { - // read leafs to the buffer until it has sufficient data to fill provided container or full ods is - // read - for r.current < r.odsSquareSize && r.buf.Len() < len(p) { - if err := r.readLeaf(); err != nil { - return 0, err - } - - r.current++ - } - - // read buffer to slice - return r.buf.Read(p) -} - -// readLeaf reads one leaf from reader into bufferedODSReader buffer -func (r *bufferedODSReader) readLeaf() error { - if _, err := r.carReader.Peek(1); err != nil { // no more blocks, likely clean io.EOF - return err - } - - l, err := binary.ReadUvarint(r.carReader) - if err != nil { - if errors.Is(err, io.EOF) { - return io.ErrUnexpectedEOF // don't silently pretend this is a clean EOF - } - return err - } - - if l > uint64(util.MaxAllowedSectionSize) { // Don't OOM - return fmt.Errorf("malformed car; header `length`: %v is bigger than %v", l, util.MaxAllowedSectionSize) - } - - buf := make([]byte, 8) - n := binary.PutUvarint(buf, l) - r.buf.Write(buf[:n]) - - _, err = r.buf.ReadFrom(io.LimitReader(r.carReader, int64(l))) - return err -} diff --git a/share/eds/ods_test.go b/share/eds/ods_test.go deleted file mode 100644 index 0f7c69e708..0000000000 --- a/share/eds/ods_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package eds - -import ( - "context" - "io" - "testing" - - "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/share" -) - -// TestODSReader ensures that the reader returned from ODSReader is capable of reading the CAR -// header and ODS. -func TestODSReader(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // launch eds store - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // generate random eds data and put it into the store - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // get CAR reader from store - r, err := edsStore.GetCAR(ctx, dah.Hash()) - assert.NoError(t, err) - defer func() { - require.NoError(t, r.Close()) - }() - - // create ODSReader wrapper based on car reader to limit reads to ODS only - odsR, err := ODSReader(r) - assert.NoError(t, err) - - // create CAR reader from ODSReader - carReader, err := car.NewCarReader(odsR) - assert.NoError(t, err) - - // validate ODS could be obtained from reader - for i := 0; i < 4; i++ { - for j := 0; j < 4; j++ { - // pick share from original eds - original := eds.GetCell(uint(i), uint(j)) - - // read block from odsReader based reader - block, err := carReader.Next() - assert.NoError(t, err) - - // check that original data from eds is same as data from reader - assert.Equal(t, original, share.GetData(block.RawData())) - } - } - - // Make sure no excess data is available to get from reader - _, err = carReader.Next() - assert.Error(t, io.EOF, err) -} - -// TestODSReaderReconstruction ensures that the reader returned from ODSReader provides sufficient -// data for EDS reconstruction -func TestODSReaderReconstruction(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // launch eds store - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // generate random eds data and put it into the store - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // get CAR reader from store - r, err := edsStore.GetCAR(ctx, dah.Hash()) - assert.NoError(t, err) - defer func() { - require.NoError(t, r.Close()) - }() - - // create ODSReader wrapper based on car reader to limit reads to ODS only - odsR, err := ODSReader(r) - assert.NoError(t, err) - - // reconstruct EDS from ODSReader - loaded, err := ReadEDS(ctx, odsR, dah.Hash()) - assert.NoError(t, err) - - rowRoots, err := eds.RowRoots() - require.NoError(t, err) - loadedRowRoots, err := loaded.RowRoots() - require.NoError(t, err) - require.Equal(t, rowRoots, loadedRowRoots) - - colRoots, err := eds.ColRoots() - require.NoError(t, err) - loadedColRoots, err := loaded.ColRoots() - require.NoError(t, err) - require.Equal(t, colRoots, loadedColRoots) -} diff --git a/share/eds/proof.go b/share/eds/proof.go new file mode 100644 index 0000000000..58b34d3b79 --- /dev/null +++ b/share/eds/proof.go @@ -0,0 +1,75 @@ +package eds + +import ( + "github.com/tendermint/tendermint/crypto/merkle" + corebytes "github.com/tendermint/tendermint/libs/bytes" + coretypes "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" + + pkgproof "github.com/celestiaorg/celestia-app/v2/pkg/proof" + "github.com/celestiaorg/go-square/shares" + "github.com/celestiaorg/rsmt2d" +) + +// ProveShares generates a share proof for a share range. +// The share range, defined by start and end, is end-exclusive. +func ProveShares(eds *rsmt2d.ExtendedDataSquare, start, end int) (*types.ShareProof, error) { + log.Debugw("proving share range", "start", start, "end", end) + + odsShares, err := shares.FromBytes(eds.FlattenedODS()) + if err != nil { + return nil, err + } + nID, err := pkgproof.ParseNamespace(odsShares, start, end) + if err != nil { + return nil, err + } + log.Debugw("generating the share proof", "start", start, "end", end) + proof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(start, end)) + if err != nil { + return nil, err + } + coreProof := toCoreShareProof(proof) + return &coreProof, nil +} + +// toCoreShareProof utility function that converts a share proof defined in app +// to the share proof defined in node. +// This will be removed once we unify both these proofs. +// Reference issue: https://github.com/celestiaorg/celestia-app/issues/3734 +func toCoreShareProof(appShareProof pkgproof.ShareProof) types.ShareProof { + shareProofs := make([]*coretypes.NMTProof, 0) + for _, proof := range appShareProof.ShareProofs { + shareProofs = append(shareProofs, &coretypes.NMTProof{ + Start: proof.Start, + End: proof.End, + Nodes: proof.Nodes, + LeafHash: proof.LeafHash, + }) + } + + rowRoots := make([]corebytes.HexBytes, 0) + rowProofs := make([]*merkle.Proof, 0) + for index, proof := range appShareProof.RowProof.Proofs { + rowRoots = append(rowRoots, appShareProof.RowProof.RowRoots[index]) + rowProofs = append(rowProofs, &merkle.Proof{ + Total: proof.Total, + Index: proof.Index, + LeafHash: proof.LeafHash, + Aunts: proof.Aunts, + }) + } + + return types.ShareProof{ + Data: appShareProof.Data, + ShareProofs: shareProofs, + NamespaceID: appShareProof.NamespaceId, + RowProof: types.RowProof{ + RowRoots: rowRoots, + Proofs: rowProofs, + StartRow: appShareProof.RowProof.StartRow, + EndRow: appShareProof.RowProof.EndRow, + }, + NamespaceVersion: appShareProof.NamespaceVersion, + } +} diff --git a/share/eds/proofs_cache.go b/share/eds/proofs_cache.go new file mode 100644 index 0000000000..3090f46322 --- /dev/null +++ b/share/eds/proofs_cache.go @@ -0,0 +1,357 @@ +package eds + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + + "github.com/ipfs/boxo/blockservice" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/ipld" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ AccessorStreamer = (*proofsCache)(nil) + +// proofsCache is eds accessor that caches proofs for rows and columns. It also caches extended +// axis Shares. It is used to speed up the process of building proofs for rows and columns, +// reducing the number of reads from the underlying accessor. Cache does not synchronize access +// to the underlying accessor. +type proofsCache struct { + inner AccessorStreamer + + // size caches the size of the data square + size atomic.Int32 + // dataHash caches the data hash + dataHash atomic.Pointer[share.DataHash] + // rootsCache caches the axis roots + rootsCache atomic.Pointer[share.AxisRoots] + // axisCacheLock protects proofCache + axisCacheLock sync.RWMutex + // axisCache caches the axis Shares and proofs. Index in the slice corresponds to the axis type. + // The map key is the index of the axis. + axisCache []map[int]axisWithProofs + + // disableCache disables caching of rows for testing purposes + disableCache bool +} + +// axisWithProofs is used to cache the extended axis Shares and proofs. +type axisWithProofs struct { + half AxisHalf + // shares are the extended axis Shares + shares []share.Share + // root caches the root of the tree. It will be set only when proofs are calculated + root []byte + // proofs are stored in a blockservice.BlockGetter by their CID. It will be set only when proofs + // are calculated and will be used to get the proof for a specific share. BlockGetter is used to + // reuse ipld based proof generation logic, which traverses the tree from the root to the leafs and + // collects the nodes on the path. This is temporary and will be replaced with a more efficient + // proof caching mechanism in nmt package, once it is implemented. + proofs blockservice.BlockGetter +} + +// WithProofsCache creates a new eds accessor with caching of proofs for rows and columns. It is +// used to speed up the process of building proofs for rows and columns, reducing the number of +// reads from the underlying accessor. +func WithProofsCache(ac AccessorStreamer) AccessorStreamer { + rows := make(map[int]axisWithProofs) + cols := make(map[int]axisWithProofs) + axisCache := []map[int]axisWithProofs{rows, cols} + return &proofsCache{ + inner: ac, + axisCache: axisCache, + } +} + +func (c *proofsCache) Size(ctx context.Context) int { + size := c.size.Load() + if size == 0 { + size = int32(c.inner.Size(ctx)) + c.size.Store(size) + } + return int(size) +} + +func (c *proofsCache) DataHash(ctx context.Context) (share.DataHash, error) { + dataHash := c.dataHash.Load() + if dataHash != nil { + return *dataHash, nil + } + loaded, err := c.inner.DataHash(ctx) + if err != nil { + return nil, err + } + c.dataHash.Store(&loaded) + return loaded, nil +} + +func (c *proofsCache) AxisRoots(ctx context.Context) (*share.AxisRoots, error) { + roots := c.rootsCache.Load() + if roots != nil { + return roots, nil + } + + // if roots are not in cache, read them from the inner accessor + roots, err := c.inner.AxisRoots(ctx) + if err != nil { + return nil, err + } + c.rootsCache.Store(roots) + return roots, nil +} + +func (c *proofsCache) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) { + axisType, axisIdx, shrIdx := rsmt2d.Row, rowIdx, colIdx + ax, err := c.axisWithProofs(ctx, axisType, axisIdx) + if err != nil { + return shwap.Sample{}, err + } + + // build share proof from proofs cached for given axis + share := ax.shares[shrIdx] + proofs, err := ipld.GetProof(ctx, ax.proofs, ax.root, shrIdx, c.Size(ctx)) + if err != nil { + return shwap.Sample{}, fmt.Errorf("building proof from cache: %w", err) + } + + return shwap.Sample{ + Share: share, + Proof: &proofs, + ProofType: axisType, + }, nil +} + +func (c *proofsCache) axisWithProofs(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (axisWithProofs, error) { + // return axis with proofs from cache if possible + ax, ok := c.getAxisFromCache(axisType, axisIdx) + if ax.proofs != nil { + // return axis with proofs from cache, only if proofs are already calculated + return ax, nil + } + + if !ok { + // if axis is not in cache, read it from the inner accessor + half, err := c.inner.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return axisWithProofs{}, fmt.Errorf("reading axis half from inner accessor: %w", err) + } + ax.half = half + } + + if len(ax.shares) == 0 { + shares, err := ax.half.Extended() + if err != nil { + return axisWithProofs{}, fmt.Errorf("reading axis shares: %w", err) + } + ax.shares = shares + } + + // build proofs from Shares and cache them + adder := ipld.NewProofsAdder(c.Size(ctx), true) + tree := wrapper.NewErasuredNamespacedMerkleTree( + uint64(c.Size(ctx)/2), + uint(axisIdx), + nmt.NodeVisitor(adder.VisitFn()), + ) + for _, shr := range ax.shares { + err := tree.Push(shr) + if err != nil { + return axisWithProofs{}, fmt.Errorf("push shares: %w", err) + } + } + + // build the tree + root, err := tree.Root() + if err != nil { + return axisWithProofs{}, fmt.Errorf("calculating root: %w", err) + } + + ax.root = root + ax.proofs, err = newRowProofsGetter(adder.Proofs()) + if err != nil { + return axisWithProofs{}, fmt.Errorf("creating proof getter: %w", err) + } + + if !c.disableCache { + c.storeAxisInCache(axisType, axisIdx, ax) + } + return ax, nil +} + +func (c *proofsCache) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { + // return axis from cache if possible + ax, ok := c.getAxisFromCache(axisType, axisIdx) + if ok { + return ax.half, nil + } + + // read axis from inner accessor if axis is in the first quadrant + half, err := c.inner.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return AxisHalf{}, fmt.Errorf("reading axis from inner accessor: %w", err) + } + + if !c.disableCache { + ax.half = half + c.storeAxisInCache(axisType, axisIdx, ax) + } + + return half, nil +} + +func (c *proofsCache) RowNamespaceData( + ctx context.Context, + namespace share.Namespace, + rowIdx int, +) (shwap.RowNamespaceData, error) { + ax, err := c.axisWithProofs(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return shwap.RowNamespaceData{}, err + } + + row, proof, err := ipld.GetSharesByNamespace(ctx, ax.proofs, ax.root, namespace, c.Size(ctx)) + if err != nil { + return shwap.RowNamespaceData{}, fmt.Errorf("shares by namespace %s for row %v: %w", namespace.String(), rowIdx, err) + } + + return shwap.RowNamespaceData{ + Shares: row, + Proof: proof, + }, nil +} + +func (c *proofsCache) Shares(ctx context.Context) ([]share.Share, error) { + odsSize := c.Size(ctx) / 2 + shares := make([]share.Share, 0, odsSize*odsSize) + for i := 0; i < c.Size(ctx)/2; i++ { + ax, err := c.AxisHalf(ctx, rsmt2d.Row, i) + if err != nil { + return nil, err + } + + half := ax.Shares + if ax.IsParity { + shares, err = c.axisShares(ctx, rsmt2d.Row, i) + if err != nil { + return nil, err + } + half = shares[:odsSize] + } + + shares = append(shares, half...) + } + return shares, nil +} + +func (c *proofsCache) Reader() (io.Reader, error) { + odsSize := c.Size(context.TODO()) / 2 + reader := NewShareReader(odsSize, c.getShare) + return reader, nil +} + +func (c *proofsCache) Close() error { + return c.inner.Close() +} + +func (c *proofsCache) axisShares(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + ax, ok := c.getAxisFromCache(axisType, axisIdx) + if ok && len(ax.shares) != 0 { + return ax.shares, nil + } + + if !ok { + // if axis is not in cache, read it from the inner accessor + half, err := c.inner.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return nil, fmt.Errorf("reading axis half from inner accessor: %w", err) + } + ax.half = half + } + + shares, err := ax.half.Extended() + if err != nil { + return nil, fmt.Errorf("extending shares: %w", err) + } + + if !c.disableCache { + ax.shares = shares + c.storeAxisInCache(axisType, axisIdx, ax) + } + return shares, nil +} + +func (c *proofsCache) storeAxisInCache(axisType rsmt2d.Axis, axisIdx int, axis axisWithProofs) { + c.axisCacheLock.Lock() + defer c.axisCacheLock.Unlock() + c.axisCache[axisType][axisIdx] = axis +} + +func (c *proofsCache) getAxisFromCache(axisType rsmt2d.Axis, axisIdx int) (axisWithProofs, bool) { + c.axisCacheLock.RLock() + defer c.axisCacheLock.RUnlock() + ax, ok := c.axisCache[axisType][axisIdx] + return ax, ok +} + +func (c *proofsCache) getShare(rowIdx, colIdx int) ([]byte, error) { + ctx := context.TODO() + odsSize := c.Size(ctx) / 2 + half, err := c.AxisHalf(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return nil, fmt.Errorf("reading axis half: %w", err) + } + + // if share is from the same side of axis return share right away + if colIdx > odsSize == half.IsParity { + if half.IsParity { + colIdx -= odsSize + } + return half.Shares[colIdx], nil + } + + // if share index is from opposite part of axis, obtain full axis shares + shares, err := c.axisShares(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return nil, fmt.Errorf("reading axis shares: %w", err) + } + return shares[colIdx], nil +} + +// rowProofsGetter implements blockservice.BlockGetter interface +type rowProofsGetter struct { + proofs map[cid.Cid]blocks.Block +} + +func newRowProofsGetter(rawProofs map[cid.Cid][]byte) (*rowProofsGetter, error) { + proofs := make(map[cid.Cid]blocks.Block, len(rawProofs)) + for k, v := range rawProofs { + b, err := blocks.NewBlockWithCid(v, k) + if err != nil { + return nil, err + } + proofs[k] = b + } + return &rowProofsGetter{proofs: proofs}, nil +} + +func (r rowProofsGetter) GetBlock(_ context.Context, c cid.Cid) (blocks.Block, error) { + if b, ok := r.proofs[c]; ok { + return b, nil + } + return nil, errors.New("block not found") +} + +func (r rowProofsGetter) GetBlocks(_ context.Context, _ []cid.Cid) <-chan blocks.Block { + panic("not implemented") +} diff --git a/share/eds/proofs_cache_test.go b/share/eds/proofs_cache_test.go new file mode 100644 index 0000000000..b570b15c1e --- /dev/null +++ b/share/eds/proofs_cache_test.go @@ -0,0 +1,27 @@ +package eds + +import ( + "context" + "testing" + "time" + + "github.com/celestiaorg/rsmt2d" +) + +func TestCache(t *testing.T) { + ODSSize := 16 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + t.Cleanup(cancel) + + newAccessor := func(tb testing.TB, inner *rsmt2d.ExtendedDataSquare) Accessor { + accessor := &Rsmt2D{ExtendedDataSquare: inner} + return WithProofsCache(accessor) + } + TestSuiteAccessor(ctx, t, newAccessor, ODSSize) + + newAccessorStreamer := func(tb testing.TB, inner *rsmt2d.ExtendedDataSquare) AccessorStreamer { + accessor := &Rsmt2D{ExtendedDataSquare: inner} + return WithProofsCache(accessor) + } + TestStreamer(ctx, t, newAccessorStreamer, ODSSize) +} diff --git a/share/eds/read.go b/share/eds/read.go new file mode 100644 index 0000000000..83149ca1ce --- /dev/null +++ b/share/eds/read.go @@ -0,0 +1,62 @@ +package eds + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + + "github.com/celestiaorg/celestia-node/share" +) + +// ReadAccessor reads up EDS out of the io.Reader until io.EOF and provides. +func ReadAccessor(ctx context.Context, reader io.Reader, root *share.AxisRoots) (*Rsmt2D, error) { + odsSize := len(root.RowRoots) / 2 + shares, err := ReadShares(reader, share.Size, odsSize) + if err != nil { + return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) + } + + // verify that the EDS hash matches the expected hash + rsmt2d, err := Rsmt2DFromShares(shares, odsSize) + if err != nil { + return nil, fmt.Errorf("failed to create rsmt2d from shares: %w", err) + } + datahash, err := rsmt2d.DataHash(ctx) + if err != nil { + return nil, fmt.Errorf("failed to calculate data hash: %w", err) + } + if !bytes.Equal(datahash, root.Hash()) { + return nil, fmt.Errorf( + "content integrity mismatch: imported root %s doesn't match expected root %s", + datahash, + root.Hash(), + ) + } + return rsmt2d, nil +} + +// ReadShares reads shares from the provided io.Reader until EOF. If EOF is reached, the remaining shares +// are populated as tail padding shares. Provided reader must contain shares in row-major order. +func ReadShares(r io.Reader, shareSize, odsSize int) ([]share.Share, error) { + shares := make([]share.Share, odsSize*odsSize) + var total int + for i := range shares { + shr := make(share.Share, shareSize) + n, err := io.ReadFull(r, shr) + if errors.Is(err, io.EOF) { + for ; i < len(shares); i++ { + shares[i] = share.TailPadding() + } + return shares, nil + } + if err != nil { + return nil, fmt.Errorf("reading shares: %w, bytes read: %v", err, total+n) + } + + shares[i] = shr + total += n + } + return shares, nil +} diff --git a/share/eds/retriever.go b/share/eds/retriever.go index a32ef1df1c..c3483809af 100644 --- a/share/eds/retriever.go +++ b/share/eds/retriever.go @@ -15,7 +15,6 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -57,18 +56,18 @@ func NewRetriever(bServ blockservice.BlockService) *Retriever { // data square and reconstructs the other three quadrants (3/4). If the requested quadrant is not // available within RetrieveQuadrantTimeout, it starts requesting another quadrant until either the // data is reconstructed, context is canceled or ErrByzantine is generated. -func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader) (*rsmt2d.ExtendedDataSquare, error) { +func (r *Retriever) Retrieve(ctx context.Context, roots *share.AxisRoots) (*rsmt2d.ExtendedDataSquare, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() // cancels all the ongoing requests if reconstruction succeeds early ctx, span := tracer.Start(ctx, "retrieve-square") defer span.End() span.SetAttributes( - attribute.Int("size", len(dah.RowRoots)), + attribute.Int("size", len(roots.RowRoots)), ) - log.Debugw("retrieving data square", "data_hash", dah.String(), "size", len(dah.RowRoots)) - ses, err := r.newSession(ctx, dah) + log.Debugw("retrieving data square", "data_hash", roots.String(), "size", len(roots.RowRoots)) + ses, err := r.newSession(ctx, roots) if err != nil { return nil, err } @@ -91,7 +90,7 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader // nmt proofs computed during the session ses.close(false) span.RecordError(err) - return nil, byzantine.NewErrByzantine(ctx, r.bServ.Blockstore(), dah, errByz) + return nil, byzantine.NewErrByzantine(ctx, r.bServ.Blockstore(), roots, errByz) } log.Warnw("not enough shares to reconstruct data square, requesting more...", "err", err) @@ -107,7 +106,7 @@ func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader // quadrant request retries. Also, provides an API // to reconstruct the block once enough shares are fetched. type retrievalSession struct { - dah *da.DataAvailabilityHeader + roots *share.AxisRoots bget blockservice.BlockGetter adder *ipld.NmtNodeAdder @@ -125,8 +124,8 @@ type retrievalSession struct { } // newSession creates a new retrieval session and kicks off requesting process. -func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHeader) (*retrievalSession, error) { - size := len(dah.RowRoots) +func (r *Retriever) newSession(ctx context.Context, roots *share.AxisRoots) (*retrievalSession, error) { + size := len(roots.RowRoots) adder := ipld.NewNmtNodeAdder(ctx, r.bServ, ipld.MaxSizeBatchOption(size)) proofsVisitor := ipld.ProofsAdderFromCtx(ctx).VisitFn() @@ -149,10 +148,10 @@ func (r *Retriever) newSession(ctx context.Context, dah *da.DataAvailabilityHead } ses := &retrievalSession{ - dah: dah, + roots: roots, bget: blockservice.NewSession(ctx, r.bServ), adder: adder, - squareQuadrants: newQuadrants(dah), + squareQuadrants: newQuadrants(roots), squareCellsLks: make([][]sync.Mutex, size), squareSig: make(chan struct{}, 1), squareDn: make(chan struct{}), @@ -187,12 +186,12 @@ func (rs *retrievalSession) Reconstruct(ctx context.Context) (*rsmt2d.ExtendedDa defer span.End() // and try to repair with what we have - err := rs.square.Repair(rs.dah.RowRoots, rs.dah.ColumnRoots) + err := rs.square.Repair(rs.roots.RowRoots, rs.roots.ColumnRoots) if err != nil { span.RecordError(err) return nil, err } - log.Infow("data square reconstructed", "data_hash", rs.dah.String(), "size", len(rs.dah.RowRoots)) + log.Infow("data square reconstructed", "data_hash", rs.roots.String(), "size", len(rs.roots.RowRoots)) close(rs.squareDn) return rs.square, nil } diff --git a/share/eds/retriever_no_race_test.go b/share/eds/retriever_no_race_test.go index 73f86d555a..e9b8dc1634 100644 --- a/share/eds/retriever_no_race_test.go +++ b/share/eds/retriever_no_race_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -46,10 +45,10 @@ func TestRetriever_ByzantineError(t *testing.T) { require.NoError(t, err) // ensure we rcv an error - dah, err := da.NewDataAvailabilityHeader(attackerEDS) + roots, err := share.NewAxisRoots(attackerEDS) require.NoError(t, err) r := NewRetriever(bserv) - _, err = r.Retrieve(ctx, &dah) + _, err = r.Retrieve(ctx, roots) var errByz *byzantine.ErrByzantine require.ErrorAs(t, err, &errByz) } diff --git a/share/eds/retriever_quadrant.go b/share/eds/retriever_quadrant.go index 64cab6366c..97a5b332f2 100644 --- a/share/eds/retriever_quadrant.go +++ b/share/eds/retriever_quadrant.go @@ -6,9 +6,9 @@ import ( "github.com/ipfs/go-cid" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -49,11 +49,11 @@ type quadrant struct { // newQuadrants constructs a slice of quadrants from DAHeader. // There are always 4 quadrants per each source (row and col), so 8 in total. // The ordering of quadrants is random. -func newQuadrants(dah *da.DataAvailabilityHeader) []*quadrant { +func newQuadrants(roots *share.AxisRoots) []*quadrant { // combine all the roots into one slice, so they can be easily accessible by index daRoots := [][][]byte{ - dah.RowRoots, - dah.ColumnRoots, + roots.RowRoots, + roots.ColumnRoots, } // create a quadrant slice for each source(row;col) sources := [][]*quadrant{ diff --git a/share/eds/retriever_test.go b/share/eds/retriever_test.go index 5c098da55e..2381b7cd75 100644 --- a/share/eds/retriever_test.go +++ b/share/eds/retriever_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" @@ -55,9 +54,9 @@ func TestRetriever_Retrieve(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Minute*5) // the timeout is big for the max size which is long defer cancel() - dah, err := da.NewDataAvailabilityHeader(in) + roots, err := share.NewAxisRoots(in) require.NoError(t, err) - out, err := r.Retrieve(ctx, &dah) + out, err := r.Retrieve(ctx, roots) require.NoError(t, err) assert.True(t, in.Equals(out)) }) @@ -80,9 +79,9 @@ func TestRetriever_MultipleRandQuadrants(t *testing.T) { in, err := ipld.AddShares(ctx, shares, bServ) require.NoError(t, err) - dah, err := da.NewDataAvailabilityHeader(in) + roots, err := share.NewAxisRoots(in) require.NoError(t, err) - ses, err := r.newSession(ctx, &dah) + ses, err := r.newSession(ctx, roots) require.NoError(t, err) // wait until two additional quadrants requested diff --git a/share/eds/rsmt2d.go b/share/eds/rsmt2d.go new file mode 100644 index 0000000000..416e3530a5 --- /dev/null +++ b/share/eds/rsmt2d.go @@ -0,0 +1,160 @@ +package eds + +import ( + "context" + "fmt" + "io" + + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ AccessorStreamer = (*Rsmt2D)(nil) + +// Rsmt2D is a rsmt2d based in-memory implementation of Accessor. +type Rsmt2D struct { + *rsmt2d.ExtendedDataSquare +} + +// Size returns the size of the Extended Data Square. +func (eds *Rsmt2D) Size(context.Context) int { + return int(eds.Width()) +} + +// DataHash returns data hash of the Accessor. +func (eds *Rsmt2D) DataHash(context.Context) (share.DataHash, error) { + roots, err := share.NewAxisRoots(eds.ExtendedDataSquare) + if err != nil { + return share.DataHash{}, fmt.Errorf("while creating data root: %w", err) + } + return roots.Hash(), nil +} + +// AxisRoots returns AxisRoots of the Accessor. +func (eds *Rsmt2D) AxisRoots(context.Context) (*share.AxisRoots, error) { + roots, err := share.NewAxisRoots(eds.ExtendedDataSquare) + if err != nil { + return nil, fmt.Errorf("while creating axis roots: %w", err) + } + return roots, nil +} + +// Sample returns share and corresponding proof for row and column indices. +func (eds *Rsmt2D) Sample( + _ context.Context, + rowIdx, colIdx int, +) (shwap.Sample, error) { + return eds.SampleForProofAxis(rowIdx, colIdx, rsmt2d.Row) +} + +// SampleForProofAxis samples a share from an Extended Data Square based on the provided +// row and column indices and proof axis. It returns a sample with the share and proof. +func (eds *Rsmt2D) SampleForProofAxis( + rowIdx, colIdx int, + proofType rsmt2d.Axis, +) (shwap.Sample, error) { + axisIdx, shrIdx := relativeIndexes(rowIdx, colIdx, proofType) + shares := getAxis(eds.ExtendedDataSquare, proofType, axisIdx) + + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(eds.Width()/2), uint(axisIdx)) + for _, shr := range shares { + err := tree.Push(shr) + if err != nil { + return shwap.Sample{}, fmt.Errorf("while pushing shares to NMT: %w", err) + } + } + + prf, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return shwap.Sample{}, fmt.Errorf("while proving range share over NMT: %w", err) + } + + return shwap.Sample{ + Share: shares[shrIdx], + Proof: &prf, + ProofType: proofType, + }, nil +} + +// AxisHalf returns Shares for the first half of the axis of the given type and index. +func (eds *Rsmt2D) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { + shares := getAxis(eds.ExtendedDataSquare, axisType, axisIdx) + halfShares := shares[:eds.Width()/2] + return AxisHalf{ + Shares: halfShares, + IsParity: false, + }, nil +} + +// HalfRow constructs a new shwap.Row from an Extended Data Square based on the specified index and +// side. +func (eds *Rsmt2D) HalfRow(idx int, side shwap.RowSide) shwap.Row { + shares := eds.ExtendedDataSquare.Row(uint(idx)) + return shwap.RowFromShares(shares, side) +} + +// RowNamespaceData returns data for the given namespace and row index. +func (eds *Rsmt2D) RowNamespaceData( + _ context.Context, + namespace share.Namespace, + rowIdx int, +) (shwap.RowNamespaceData, error) { + shares := eds.Row(uint(rowIdx)) + return shwap.RowNamespaceDataFromShares(shares, namespace, rowIdx) +} + +// Shares returns data (ODS) shares extracted from the EDS. It returns new copy of the shares each +// time. +func (eds *Rsmt2D) Shares(_ context.Context) ([]share.Share, error) { + return eds.ExtendedDataSquare.FlattenedODS(), nil +} + +func (eds *Rsmt2D) Close() error { + return nil +} + +// Reader returns binary reader for the file. +func (eds *Rsmt2D) Reader() (io.Reader, error) { + getShare := func(rowIdx, colIdx int) ([]byte, error) { + return eds.GetCell(uint(rowIdx), uint(colIdx)), nil + } + odsSize := int(eds.Width() / 2) + reader := NewShareReader(odsSize, getShare) + return reader, nil +} + +// Rsmt2DFromShares constructs an Extended Data Square from shares. +func Rsmt2DFromShares(shares []share.Share, odsSize int) (*Rsmt2D, error) { + treeFn := wrapper.NewConstructor(uint64(odsSize)) + eds, err := rsmt2d.ComputeExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), treeFn) + if err != nil { + return &Rsmt2D{}, fmt.Errorf("computing extended data square: %w", err) + } + + return &Rsmt2D{eds}, nil +} + +func getAxis(eds *rsmt2d.ExtendedDataSquare, axisType rsmt2d.Axis, axisIdx int) []share.Share { + switch axisType { + case rsmt2d.Row: + return eds.Row(uint(axisIdx)) + case rsmt2d.Col: + return eds.Col(uint(axisIdx)) + default: + panic("unknown axis") + } +} + +func relativeIndexes(rowIdx, colIdx int, axisType rsmt2d.Axis) (axisIdx, shrIdx int) { + switch axisType { + case rsmt2d.Row: + return rowIdx, colIdx + case rsmt2d.Col: + return colIdx, rowIdx + default: + panic(fmt.Sprintf("invalid proof type: %d", axisType)) + } +} diff --git a/share/eds/rsmt2d_test.go b/share/eds/rsmt2d_test.go new file mode 100644 index 0000000000..a639aa96e0 --- /dev/null +++ b/share/eds/rsmt2d_test.go @@ -0,0 +1,76 @@ +package eds + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestRsmt2dAccessor(t *testing.T) { + odsSize := 16 + newAccessor := func(tb testing.TB, eds *rsmt2d.ExtendedDataSquare) Accessor { + return &Rsmt2D{ExtendedDataSquare: eds} + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + t.Cleanup(cancel) + + TestSuiteAccessor(ctx, t, newAccessor, odsSize) + + newStreamer := func(tb testing.TB, eds *rsmt2d.ExtendedDataSquare) AccessorStreamer { + return &Rsmt2D{ExtendedDataSquare: eds} + } + TestStreamer(ctx, t, newStreamer, odsSize) +} + +func TestRsmt2dHalfRow(t *testing.T) { + const odsSize = 8 + eds, _ := randRsmt2dAccsessor(t, odsSize) + + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for _, side := range []shwap.RowSide{shwap.Left, shwap.Right} { + row := eds.HalfRow(rowIdx, side) + + want := eds.Row(uint(rowIdx)) + shares, err := row.Shares() + require.NoError(t, err) + require.Equal(t, want, shares) + } + } +} + +func TestRsmt2dSampleForProofAxis(t *testing.T) { + const odsSize = 8 + eds := edstest.RandEDS(t, odsSize) + accessor := Rsmt2D{ExtendedDataSquare: eds} + + for _, proofType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for colIdx := 0; colIdx < odsSize*2; colIdx++ { + sample, err := accessor.SampleForProofAxis(rowIdx, colIdx, proofType) + require.NoError(t, err) + + want := eds.GetCell(uint(rowIdx), uint(colIdx)) + require.Equal(t, want, sample.Share) + require.Equal(t, proofType, sample.ProofType) + require.NotNil(t, sample.Proof) + require.Equal(t, sample.Proof.End()-sample.Proof.Start(), 1) + require.Len(t, sample.Proof.Nodes(), 4) + } + } + } +} + +func randRsmt2dAccsessor(t *testing.T, size int) (Rsmt2D, *share.AxisRoots) { + eds := edstest.RandEDS(t, size) + root, err := share.NewAxisRoots(eds) + require.NoError(t, err) + return Rsmt2D{ExtendedDataSquare: eds}, root +} diff --git a/share/eds/share_reader.go b/share/eds/share_reader.go new file mode 100644 index 0000000000..84ab1dffe7 --- /dev/null +++ b/share/eds/share_reader.go @@ -0,0 +1,80 @@ +package eds + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// ShareReader implement io.Reader over general function that gets shares by +// their respective Row and Col coordinates. +// It enables share streaming over arbitrary storages. +type ShareReader struct { + // getShare general share getting function for share retrieval + getShare func(rowIdx, colIdx int) ([]byte, error) + + // buf buffers shares from partial reads with default size + buf *bytes.Buffer + // current is the amount of Shares stored in square that have been written by squareCopy. When + // current reaches total, squareCopy will prevent further reads by returning io.EOF + current, odsSize, total int +} + +// NewShareReader constructs a new ShareGetter from underlying ODS size and general share getting function. +func NewShareReader(odsSize int, getShare func(rowIdx, colIdx int) ([]byte, error)) *ShareReader { + return &ShareReader{ + getShare: getShare, + buf: bytes.NewBuffer(nil), + odsSize: odsSize, + total: odsSize * odsSize, + } +} + +func (r *ShareReader) Read(p []byte) (int, error) { + if r.current >= r.total && r.buf.Len() == 0 { + return 0, io.EOF + } + // if provided array is smaller than data in buf, read from buf + if len(p) <= r.buf.Len() { + return r.buf.Read(p) + } + n, err := io.ReadFull(r.buf, p) + if err == nil { + return n, nil + } + if !errors.Is(err, io.ErrUnexpectedEOF) && !errors.Is(err, io.EOF) { + return n, fmt.Errorf("unexpected error reading from buf: %w", err) + } + + written := n + for r.current < r.total { + rowIdx, colIdx := r.current/r.odsSize, r.current%r.odsSize + share, err := r.getShare(rowIdx, colIdx) + if err != nil { + return 0, fmt.Errorf("get share: %w", err) + } + + // copy share to provided buffer + emptySpace := len(p) - written + r.current++ + if len(share) < emptySpace { + n := copy(p[written:], share) + written += n + continue + } + + // if share didn't fit into buffer fully, store remaining bytes into inner buf + n := copy(p[written:], share[:emptySpace]) + written += n + n, err = r.buf.Write(share[emptySpace:]) + if err != nil { + return 0, fmt.Errorf("write share to inner buffer: %w", err) + } + if n != len(share)-emptySpace { + return 0, fmt.Errorf("share was not written fully: %w", io.ErrShortWrite) + } + return written, nil + } + return written, nil +} diff --git a/share/eds/share_reader_test.go b/share/eds/share_reader_test.go new file mode 100644 index 0000000000..3cd67a08cd --- /dev/null +++ b/share/eds/share_reader_test.go @@ -0,0 +1,54 @@ +package eds + +import ( + "errors" + "io" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestShareReader(t *testing.T) { + // create io.Writer that write random data + odsSize := 16 + eds := edstest.RandEDS(t, odsSize) + getShare := func(rowIdx, colIdx int) ([]byte, error) { + return eds.GetCell(uint(rowIdx), uint(colIdx)), nil + } + + reader := NewShareReader(odsSize, getShare) + readBytes, err := readWithRandomBuffer(reader, 1024) + require.NoError(t, err) + expected := make([]byte, 0, odsSize*odsSize*share.Size) + for _, share := range eds.FlattenedODS() { + expected = append(expected, share...) + } + require.Len(t, readBytes, len(expected)) + require.Equal(t, expected, readBytes) +} + +// testRandReader reads from reader with buffers of random sizes. +func readWithRandomBuffer(reader io.Reader, maxBufSize int) ([]byte, error) { + // create buffer of random size + data := make([]byte, 0, maxBufSize) + for { + bufSize := rand.Intn(maxBufSize-1) + 1 + buf := make([]byte, bufSize) + n, err := reader.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + if n < bufSize { + buf = buf[:n] + } + data = append(data, buf...) + if errors.Is(err, io.EOF) { + break + } + } + return data, nil +} diff --git a/share/eds/store.go b/share/eds/store.go deleted file mode 100644 index 249555d69e..0000000000 --- a/share/eds/store.go +++ /dev/null @@ -1,650 +0,0 @@ -package eds - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/index" - "github.com/filecoin-project/dagstore/mount" - "github.com/filecoin-project/dagstore/shard" - bstore "github.com/ipfs/boxo/blockstore" - "github.com/ipfs/go-datastore" - carv1 "github.com/ipld/go-car" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/cache" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -const ( - blocksPath = "/blocks/" - indexPath = "/index/" - transientsPath = "/transients/" -) - -var ErrNotFound = errors.New("eds not found in store") - -// Store maintains (via DAGStore) a top-level index enabling granular and efficient random access to -// every share and/or Merkle proof over every registered CARv1 file. The EDSStore provides a custom -// blockstore interface implementation to achieve access. The main use-case is randomized sampling -// over the whole chain of EDS block data and getting data by namespace. -type Store struct { - cancel context.CancelFunc - - dgstr *dagstore.DAGStore - mounts *mount.Registry - - bs *blockstore - cache atomic.Pointer[cache.DoubleCache] - - carIdx index.FullIndexRepo - invertedIdx *simpleInvertedIndex - - basepath string - gcInterval time.Duration - // lastGCResult is only stored on the store for testing purposes. - lastGCResult atomic.Pointer[dagstore.GCResult] - - // stripedLocks is used to synchronize parallel operations - stripedLocks [256]sync.Mutex - shardFailures chan dagstore.ShardResult - - metrics *metrics -} - -// NewStore creates a new EDS Store under the given basepath and datastore. -func NewStore(params *Parameters, basePath string, ds datastore.Batching) (*Store, error) { - if err := params.Validate(); err != nil { - return nil, err - } - - err := setupPath(basePath) - if err != nil { - return nil, fmt.Errorf("failed to setup eds.Store directories: %w", err) - } - - r := mount.NewRegistry() - err = r.Register("fs", &inMemoryOnceMount{}) - if err != nil { - return nil, fmt.Errorf("failed to register memory mount on the registry: %w", err) - } - if err != nil { - return nil, fmt.Errorf("failed to register FS mount on the registry: %w", err) - } - - fsRepo, err := index.NewFSRepo(basePath + indexPath) - if err != nil { - return nil, fmt.Errorf("failed to create index repository: %w", err) - } - - invertedIdx, err := newSimpleInvertedIndex(basePath) - if err != nil { - return nil, fmt.Errorf("failed to create index: %w", err) - } - - failureChan := make(chan dagstore.ShardResult) - dagStore, err := dagstore.NewDAGStore( - dagstore.Config{ - TransientsDir: basePath + transientsPath, - IndexRepo: fsRepo, - Datastore: ds, - MountRegistry: r, - TopLevelIndex: invertedIdx, - FailureCh: failureChan, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create DAGStore: %w", err) - } - - recentBlocksCache, err := cache.NewAccessorCache("recent", params.RecentBlocksCacheSize) - if err != nil { - return nil, fmt.Errorf("failed to create recent blocks cache: %w", err) - } - - blockstoreCache, err := cache.NewAccessorCache("blockstore", params.BlockstoreCacheSize) - if err != nil { - return nil, fmt.Errorf("failed to create blockstore cache: %w", err) - } - - store := &Store{ - basepath: basePath, - dgstr: dagStore, - carIdx: fsRepo, - invertedIdx: invertedIdx, - gcInterval: params.GCInterval, - mounts: r, - shardFailures: failureChan, - } - store.bs = newBlockstore(store, ds) - store.cache.Store(cache.NewDoubleCache(recentBlocksCache, blockstoreCache)) - return store, nil -} - -func (s *Store) Start(ctx context.Context) error { - err := s.dgstr.Start(ctx) - if err != nil { - return err - } - // start Store only if DagStore succeeds - runCtx, cancel := context.WithCancel(context.Background()) - s.cancel = cancel - // initialize empty gc result to avoid panic on access - s.lastGCResult.Store(&dagstore.GCResult{ - Shards: make(map[shard.Key]error), - }) - - if s.gcInterval != 0 { - go s.gc(runCtx) - } - - go s.watchForFailures(runCtx) - return nil -} - -// Stop stops the underlying DAGStore. -func (s *Store) Stop(context.Context) error { - defer s.cancel() - - if err := s.metrics.close(); err != nil { - log.Warnw("failed to close metrics", "err", err) - } - - if err := s.invertedIdx.close(); err != nil { - return err - } - return s.dgstr.Close() -} - -// gc periodically removes all inactive or errored shards. -func (s *Store) gc(ctx context.Context) { - ticker := time.NewTicker(s.gcInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - tnow := time.Now() - res, err := s.dgstr.GC(ctx) - s.metrics.observeGCtime(ctx, time.Since(tnow), err != nil) - if err != nil { - log.Errorf("garbage collecting dagstore: %v", err) - return - } - s.lastGCResult.Store(res) - } - } -} - -func (s *Store) watchForFailures(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case res := <-s.shardFailures: - log.Errorw("removing shard after failure", "key", res.Key, "err", res.Error) - s.metrics.observeShardFailure(ctx, res.Key.String()) - k := share.MustDataHashFromString(res.Key.String()) - err := s.Remove(ctx, k) - if err != nil { - log.Errorw("failed to remove shard after failure", "key", res.Key, "err", err) - } - } - } -} - -// Put stores the given data square with DataRoot's hash as a key. -// -// The square is verified on the Exchange level, and Put only stores the square, trusting it. -// The resulting file stores all the shares and NMT Merkle Proofs of the EDS. -// Additionally, the file gets indexed s.t. store.Blockstore can access them. -func (s *Store) Put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) error { - ctx, span := tracer.Start(ctx, "store/put", trace.WithAttributes( - attribute.Int("width", int(square.Width())), - )) - - tnow := time.Now() - err := s.put(ctx, root, square) - result := putOK - switch { - case errors.Is(err, dagstore.ErrShardExists): - result = putExists - case err != nil: - result = putFailed - } - utils.SetStatusAndEnd(span, err) - s.metrics.observePut(ctx, time.Since(tnow), result, square.Width()) - return err -} - -func (s *Store) put(ctx context.Context, root share.DataHash, square *rsmt2d.ExtendedDataSquare) (err error) { - lk := &s.stripedLocks[root[len(root)-1]] - lk.Lock() - defer lk.Unlock() - - // if root already exists, short-circuit - if has, _ := s.Has(ctx, root); has { - return dagstore.ErrShardExists - } - - key := root.String() - f, err := os.OpenFile(s.basepath+blocksPath+key, os.O_CREATE|os.O_WRONLY, 0o600) - if err != nil { - return err - } - defer closeAndLog("car file", f) - - // save encoded eds into buffer - mount := &inMemoryOnceMount{ - // TODO: buffer could be pre-allocated with capacity calculated based on eds size. - buf: bytes.NewBuffer(nil), - FileMount: mount.FileMount{Path: s.basepath + blocksPath + key}, - } - err = WriteEDS(ctx, square, mount) - if err != nil { - return fmt.Errorf("failed to write EDS to file: %w", err) - } - - // write whole buffered mount data in one go to optimize i/o - if _, err = mount.WriteTo(f); err != nil { - return fmt.Errorf("failed to write EDS to file: %w", err) - } - - ch := make(chan dagstore.ShardResult, 1) - err = s.dgstr.RegisterShard(ctx, shard.KeyFromString(key), mount, ch, dagstore.RegisterOpts{}) - if err != nil { - return fmt.Errorf("failed to initiate shard registration: %w", err) - } - - var result dagstore.ShardResult - select { - case result = <-ch: - case <-ctx.Done(): - // if the context finished before the result was received, track the result in a separate goroutine - go trackLateResult("put", ch, s.metrics, time.Minute*5) - return ctx.Err() - } - - if result.Error != nil { - return fmt.Errorf("failed to register shard: %w", result.Error) - } - - // the accessor returned in the result will be nil, so the shard needs to be acquired first to - // become available in the cache. It might take some time, and the result should not affect the put - // operation, so do it in a goroutine - // TODO: Ideally, only recent blocks should be put in the cache, but there is no way right now to - // check such a condition. - go func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ac, err := s.cache.Load().First().GetOrLoad(ctx, result.Key, s.getAccessor) - if err != nil { - log.Warnw("unable to put accessor to recent blocks accessors cache", "err", err) - return - } - - // need to close returned accessor to remove the reader reference - if err := ac.Close(); err != nil { - log.Warnw("unable to close accessor after loading", "err", err) - } - }() - - return nil -} - -// waitForResult waits for a result from the res channel for a maximum duration specified by -// maxWait. If the result is not received within the specified duration, it logs an error -// indicating that the parent context has expired and the shard registration is stuck. If a result -// is received, it checks for any error and logs appropriate messages. -func trackLateResult(opName string, res <-chan dagstore.ShardResult, metrics *metrics, maxWait time.Duration) { - tnow := time.Now() - select { - case <-time.After(maxWait): - metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpUnresolved) - log.Errorf("parent context is expired, while register shard is stuck for more than %v sec", time.Since(tnow)) - return - case result := <-res: - // don't observe if result was received right after launch of the func - if time.Since(tnow) < time.Second { - return - } - if result.Error != nil { - metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpFailed) - log.Errorf("failed to register shard after context expired: %v ago, err: %s", time.Since(tnow), result.Error) - return - } - metrics.observeLongOp(context.Background(), opName, time.Since(tnow), longOpOK) - log.Warnf("parent context expired, but register shard finished with no error,"+ - " after context expired: %v ago", time.Since(tnow)) - return - } -} - -// GetCAR takes a DataRoot and returns a buffered reader to the respective EDS serialized as a -// CARv1 file. -// The Reader strictly reads the CAR header and first quadrant (1/4) of the EDS, omitting all the -// NMT Merkle proofs. Integrity of the store data is not verified. -// -// The shard is cached in the Store, so subsequent calls to GetCAR with the same root will use the -// same reader. The cache is responsible for closing the underlying reader. -func (s *Store) GetCAR(ctx context.Context, root share.DataHash) (io.ReadCloser, error) { - ctx, span := tracer.Start(ctx, "store/get-car") - tnow := time.Now() - r, err := s.getCAR(ctx, root) - s.metrics.observeGetCAR(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return r, err -} - -func (s *Store) getCAR(ctx context.Context, root share.DataHash) (io.ReadCloser, error) { - key := shard.KeyFromString(root.String()) - accessor, err := s.cache.Load().Get(key) - if err == nil { - return newReadCloser(accessor), nil - } - // If the accessor is not found in the cache, create a new one from dagstore. We don't put the - // accessor in the cache here because getCAR is used by shrex-eds. There is a lower probability, - // compared to other cache put triggers, that the same block will be requested again soon. - shardAccessor, err := s.getAccessor(ctx, key) - if err != nil { - return nil, fmt.Errorf("failed to get accessor: %w", err) - } - - return newReadCloser(shardAccessor), nil -} - -// Blockstore returns an IPFS blockstore providing access to individual shares/nodes of all EDS -// registered on the Store. NOTE: The blockstore does not store whole Celestia Blocks but IPFS -// blocks. We represent `shares` and NMT Merkle proofs as IPFS blocks and IPLD nodes so Bitswap can -// access those. -func (s *Store) Blockstore() bstore.Blockstore { - return s.bs -} - -// CARBlockstore returns an IPFS Blockstore providing access to individual shares/nodes of a -// specific EDS identified by DataHash and registered on the Store. NOTE: The Blockstore does not -// store whole Celestia Blocks but IPFS blocks. We represent `shares` and NMT Merkle proofs as IPFS -// blocks and IPLD nodes so Bitswap can access those. -func (s *Store) CARBlockstore( - ctx context.Context, - root share.DataHash, -) (*BlockstoreCloser, error) { - ctx, span := tracer.Start(ctx, "store/car-blockstore") - tnow := time.Now() - cbs, err := s.carBlockstore(ctx, root) - s.metrics.observeCARBlockstore(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return cbs, err -} - -func (s *Store) carBlockstore( - ctx context.Context, - root share.DataHash, -) (*BlockstoreCloser, error) { - key := shard.KeyFromString(root.String()) - accessor, err := s.cache.Load().Get(key) - if err == nil { - return blockstoreCloser(accessor) - } - - // if the accessor is not found in the cache, create a new one from dagstore - sa, err := s.getAccessor(ctx, key) - if err != nil { - return nil, fmt.Errorf("failed to get accessor: %w", err) - } - return blockstoreCloser(sa) -} - -// GetDAH returns the DataAvailabilityHeader for the EDS identified by DataHash. -func (s *Store) GetDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { - ctx, span := tracer.Start(ctx, "store/car-dah") - tnow := time.Now() - r, err := s.getDAH(ctx, root) - s.metrics.observeGetDAH(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return r, err -} - -func (s *Store) getDAH(ctx context.Context, root share.DataHash) (*share.Root, error) { - r, err := s.getCAR(ctx, root) - if err != nil { - return nil, fmt.Errorf("eds/store: failed to get CAR file: %w", err) - } - defer closeAndLog("car reader", r) - - carHeader, err := carv1.ReadHeader(bufio.NewReader(r)) - if err != nil { - return nil, fmt.Errorf("eds/store: failed to read car header: %w", err) - } - - dah := dahFromCARHeader(carHeader) - if !bytes.Equal(dah.Hash(), root) { - return nil, fmt.Errorf("eds/store: content integrity mismatch from CAR for root %x", root) - } - return dah, nil -} - -// dahFromCARHeader returns the DataAvailabilityHeader stored in the CIDs of a CARv1 header. -func dahFromCARHeader(carHeader *carv1.CarHeader) *share.Root { - rootCount := len(carHeader.Roots) - rootBytes := make([][]byte, 0, rootCount) - for _, root := range carHeader.Roots { - rootBytes = append(rootBytes, ipld.NamespacedSha256FromCID(root)) - } - return &share.Root{ - RowRoots: rootBytes[:rootCount/2], - ColumnRoots: rootBytes[rootCount/2:], - } -} - -func (s *Store) getAccessor(ctx context.Context, key shard.Key) (cache.Accessor, error) { - ch := make(chan dagstore.ShardResult, 1) - err := s.dgstr.AcquireShard(ctx, key, ch, dagstore.AcquireOpts{}) - if err != nil { - if errors.Is(err, dagstore.ErrShardUnknown) { - return nil, ErrNotFound - } - return nil, fmt.Errorf("failed to initialize shard acquisition: %w", err) - } - - select { - case res := <-ch: - if res.Error != nil { - return nil, fmt.Errorf("failed to acquire shard: %w", res.Error) - } - return res.Accessor, nil - case <-ctx.Done(): - go trackLateResult("get_shard", ch, s.metrics, time.Minute) - return nil, ctx.Err() - } -} - -// Remove removes EDS from Store by the given share.Root hash and cleans up all -// the indexing. -func (s *Store) Remove(ctx context.Context, root share.DataHash) error { - ctx, span := tracer.Start(ctx, "store/remove") - tnow := time.Now() - err := s.remove(ctx, root) - s.metrics.observeRemove(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return err -} - -func (s *Store) remove(ctx context.Context, root share.DataHash) (err error) { - key := shard.KeyFromString(root.String()) - // remove open links to accessor from cache - if err := s.cache.Load().Remove(key); err != nil { - log.Warnw("remove accessor from cache", "err", err) - } - ch := make(chan dagstore.ShardResult, 1) - err = s.dgstr.DestroyShard(ctx, key, ch, dagstore.DestroyOpts{}) - if err != nil { - return fmt.Errorf("failed to initiate shard destruction: %w", err) - } - - select { - case result := <-ch: - if result.Error != nil { - return fmt.Errorf("failed to destroy shard: %w", result.Error) - } - case <-ctx.Done(): - go trackLateResult("remove", ch, s.metrics, time.Minute) - return ctx.Err() - } - - dropped, err := s.carIdx.DropFullIndex(key) - if !dropped { - log.Warnf("failed to drop index for %s", key) - } - if err != nil { - return fmt.Errorf("failed to drop index for %s: %w", key, err) - } - - err = os.Remove(s.basepath + blocksPath + root.String()) - if err != nil { - return fmt.Errorf("failed to remove CAR file: %w", err) - } - return nil -} - -// Get reads EDS out of Store by given DataRoot. -// -// It reads only one quadrant(1/4) of the EDS and verifies the integrity of the stored data by -// recomputing it. -func (s *Store) Get(ctx context.Context, root share.DataHash) (*rsmt2d.ExtendedDataSquare, error) { - ctx, span := tracer.Start(ctx, "store/get") - tnow := time.Now() - eds, err := s.get(ctx, root) - s.metrics.observeGet(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return eds, err -} - -func (s *Store) get(ctx context.Context, root share.DataHash) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "store/get") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - r, err := s.getCAR(ctx, root) - if err != nil { - return nil, fmt.Errorf("failed to get CAR file: %w", err) - } - defer closeAndLog("car reader", r) - - eds, err = ReadEDS(ctx, r, root) - if err != nil { - return nil, fmt.Errorf("failed to read EDS from CAR file: %w", err) - } - return eds, nil -} - -// Has checks if EDS exists by the given share.Root hash. -func (s *Store) Has(ctx context.Context, root share.DataHash) (has bool, err error) { - ctx, span := tracer.Start(ctx, "store/has") - tnow := time.Now() - eds, err := s.has(ctx, root) - s.metrics.observeHas(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return eds, err -} - -func (s *Store) has(_ context.Context, root share.DataHash) (bool, error) { - key := root.String() - info, err := s.dgstr.GetShardInfo(shard.KeyFromString(key)) - switch { - case err == nil: - return true, info.Error - case errors.Is(err, dagstore.ErrShardUnknown): - return false, info.Error - default: - return false, err - } -} - -// List lists all the registered EDSes. -func (s *Store) List() ([]share.DataHash, error) { - ctx, span := tracer.Start(context.Background(), "store/list") - tnow := time.Now() - hashes, err := s.list() - s.metrics.observeList(ctx, time.Since(tnow), err != nil) - utils.SetStatusAndEnd(span, err) - return hashes, err -} - -func (s *Store) list() ([]share.DataHash, error) { - shards := s.dgstr.AllShardsInfo() - hashes := make([]share.DataHash, 0, len(shards)) - for shrd := range shards { - hash := share.MustDataHashFromString(shrd.String()) - hashes = append(hashes, hash) - } - return hashes, nil -} - -func setupPath(basepath string) error { - err := os.MkdirAll(basepath+blocksPath, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create blocks directory: %w", err) - } - err = os.MkdirAll(basepath+transientsPath, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create transients directory: %w", err) - } - err = os.MkdirAll(basepath+indexPath, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create index directory: %w", err) - } - return nil -} - -// inMemoryOnceMount is used to allow reading once from buffer before using main mount.Reader -type inMemoryOnceMount struct { - buf *bytes.Buffer - - readOnce atomic.Bool - mount.FileMount -} - -func (m *inMemoryOnceMount) Fetch(ctx context.Context) (mount.Reader, error) { - if m.buf != nil && !m.readOnce.Swap(true) { - reader := &inMemoryReader{Reader: bytes.NewReader(m.buf.Bytes())} - // release memory for gc, otherwise buffer will stick forever - m.buf = nil - return reader, nil - } - return m.FileMount.Fetch(ctx) -} - -func (m *inMemoryOnceMount) Write(b []byte) (int, error) { - return m.buf.Write(b) -} - -func (m *inMemoryOnceMount) WriteTo(w io.Writer) (int64, error) { - return io.Copy(w, bytes.NewReader(m.buf.Bytes())) -} - -// inMemoryReader extends bytes.Reader to implement mount.Reader interface -type inMemoryReader struct { - *bytes.Reader -} - -// Close allows inMemoryReader to satisfy mount.Reader interface -func (r *inMemoryReader) Close() error { - return nil -} diff --git a/share/eds/store_options.go b/share/eds/store_options.go deleted file mode 100644 index c8dcc69136..0000000000 --- a/share/eds/store_options.go +++ /dev/null @@ -1,43 +0,0 @@ -package eds - -import ( - "errors" - "time" -) - -type Parameters struct { - // GC performs DAG store garbage collection by reclaiming transient files of - // shards that are currently available but inactive, or errored. - // We don't use transient files right now, so GC is turned off by default. - GCInterval time.Duration - - // RecentBlocksCacheSize is the size of the cache for recent blocks. - RecentBlocksCacheSize int - - // BlockstoreCacheSize is the size of the cache for blockstore requested accessors. - BlockstoreCacheSize int -} - -// DefaultParameters returns the default configuration values for the EDS store parameters. -func DefaultParameters() *Parameters { - return &Parameters{ - GCInterval: 0, - RecentBlocksCacheSize: 10, - BlockstoreCacheSize: 128, - } -} - -func (p *Parameters) Validate() error { - if p.GCInterval < 0 { - return errors.New("eds: GC interval cannot be negative") - } - - if p.RecentBlocksCacheSize < 1 { - return errors.New("eds: recent blocks cache size must be positive") - } - - if p.BlockstoreCacheSize < 1 { - return errors.New("eds: blockstore cache size must be positive") - } - return nil -} diff --git a/share/eds/store_test.go b/share/eds/store_test.go deleted file mode 100644 index c5d87f7352..0000000000 --- a/share/eds/store_test.go +++ /dev/null @@ -1,539 +0,0 @@ -package eds - -import ( - "context" - "io" - "os" - "sync" - "testing" - "time" - - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - dsbadger "github.com/ipfs/go-ds-badger4" - "github.com/ipld/go-car" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/v2/pkg/da" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/cache" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -func TestEDSStore(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // PutRegistersShard tests if Put registers the shard on the underlying DAGStore - t.Run("PutRegistersShard", func(t *testing.T) { - eds, dah := randomEDS(t) - - // shard hasn't been registered yet - has, err := edsStore.Has(ctx, dah.Hash()) - assert.False(t, has) - assert.NoError(t, err) - - err = edsStore.Put(ctx, dah.Hash(), eds) - assert.NoError(t, err) - - _, err = edsStore.dgstr.GetShardInfo(shard.KeyFromString(dah.String())) - assert.NoError(t, err) - }) - - // PutIndexesEDS ensures that Putting an EDS indexes it into the car index - t.Run("PutIndexesEDS", func(t *testing.T) { - eds, dah := randomEDS(t) - - stat, _ := edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) - assert.False(t, stat.Exists) - - err = edsStore.Put(ctx, dah.Hash(), eds) - assert.NoError(t, err) - - stat, err = edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) - assert.True(t, stat.Exists) - assert.NoError(t, err) - }) - - // GetCAR ensures that the reader returned from GetCAR is capable of reading the CAR header and - // ODS. - t.Run("GetCAR", func(t *testing.T) { - eds, dah := randomEDS(t) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - r, err := edsStore.GetCAR(ctx, dah.Hash()) - assert.NoError(t, err) - defer func() { - require.NoError(t, r.Close()) - }() - carReader, err := car.NewCarReader(r) - assert.NoError(t, err) - - for i := 0; i < 4; i++ { - for j := 0; j < 4; j++ { - original := eds.GetCell(uint(i), uint(j)) - block, err := carReader.Next() - assert.NoError(t, err) - assert.Equal(t, original, share.GetData(block.RawData())) - } - } - }) - - t.Run("item not exist", func(t *testing.T) { - root := share.DataHash{1} - _, err := edsStore.GetCAR(ctx, root) - assert.ErrorIs(t, err, ErrNotFound) - - _, err = edsStore.GetDAH(ctx, root) - assert.ErrorIs(t, err, ErrNotFound) - - _, err = edsStore.CARBlockstore(ctx, root) - assert.ErrorIs(t, err, ErrNotFound) - }) - - t.Run("Remove", func(t *testing.T) { - eds, dah := randomEDS(t) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // assert that file now exists - _, err = os.Stat(edsStore.basepath + blocksPath + dah.String()) - assert.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - err = edsStore.Remove(ctx, dah.Hash()) - assert.NoError(t, err) - - // shard should no longer be registered on the dagstore - _, err = edsStore.dgstr.GetShardInfo(shard.KeyFromString(dah.String())) - assert.Error(t, err, "shard not found") - - // shard should have been dropped from the index, which also removes the file under /index/ - indexStat, err := edsStore.carIdx.StatFullIndex(shard.KeyFromString(dah.String())) - assert.NoError(t, err) - assert.False(t, indexStat.Exists) - - // file no longer exists - _, err = os.Stat(edsStore.basepath + blocksPath + dah.String()) - assert.ErrorContains(t, err, "no such file or directory") - }) - - t.Run("Remove after OpShardFail", func(t *testing.T) { - eds, dah := randomEDS(t) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // assert that shard now exists - ok, err := edsStore.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.True(t, ok) - - // assert that file now exists - path := edsStore.basepath + blocksPath + dah.String() - _, err = os.Stat(path) - assert.NoError(t, err) - - err = os.Remove(path) - assert.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // remove non-failed accessor from cache - err = edsStore.cache.Load().Remove(shard.KeyFromString(dah.String())) - assert.NoError(t, err) - - _, err = edsStore.GetCAR(ctx, dah.Hash()) - assert.Error(t, err) - - ticker := time.NewTicker(time.Millisecond * 100) - defer ticker.Stop() - for { - select { - case <-ticker.C: - has, err := edsStore.Has(ctx, dah.Hash()) - if err == nil && !has { - // shard no longer exists after OpShardFail was detected from GetCAR call - return - } - case <-ctx.Done(): - t.Fatal("timeout waiting for shard to be removed") - } - } - }) - - t.Run("Has", func(t *testing.T) { - eds, dah := randomEDS(t) - - ok, err := edsStore.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.False(t, ok) - - err = edsStore.Put(ctx, dah.Hash(), eds) - assert.NoError(t, err) - - ok, err = edsStore.Has(ctx, dah.Hash()) - assert.NoError(t, err) - assert.True(t, ok) - }) - - t.Run("RecentBlocksCache", func(t *testing.T) { - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // check, that the key is in the cache after put - shardKey := shard.KeyFromString(dah.String()) - _, err = edsStore.cache.Load().Get(shardKey) - assert.NoError(t, err) - }) - - t.Run("List", func(t *testing.T) { - const amount = 10 - hashes := make([]share.DataHash, 0, amount) - for range make([]byte, amount) { - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - hashes = append(hashes, dah.Hash()) - } - - hashesOut, err := edsStore.List() - require.NoError(t, err) - for _, hash := range hashes { - assert.Contains(t, hashesOut, hash) - } - }) - - t.Run("Parallel put", func(t *testing.T) { - const amount = 20 - eds, dah := randomEDS(t) - - wg := sync.WaitGroup{} - for i := 1; i < amount; i++ { - wg.Add(1) - go func() { - defer wg.Done() - err := edsStore.Put(ctx, dah.Hash(), eds) - if err != nil { - require.ErrorIs(t, err, dagstore.ErrShardExists) - } - }() - } - wg.Wait() - - eds, err := edsStore.Get(ctx, dah.Hash()) - require.NoError(t, err) - newDah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(t, err) - require.Equal(t, dah.Hash(), newDah.Hash()) - }) -} - -// TestEDSStore_GC verifies that unused transient shards are collected by the GC periodically. -func TestEDSStore_GC(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - edsStore.gcInterval = time.Second - require.NoError(t, err) - - // kicks off the gc goroutine - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds, dah := randomEDS(t) - shardKey := shard.KeyFromString(dah.String()) - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // remove links to the shard from cache - time.Sleep(time.Millisecond * 100) - key := shard.KeyFromString(share.DataHash(dah.Hash()).String()) - err = edsStore.cache.Load().Remove(key) - require.NoError(t, err) - - // doesn't exist yet - assert.NotContains(t, edsStore.lastGCResult.Load().Shards, shardKey) - - // wait for gc to run, retry three times - for i := 0; i < 3; i++ { - time.Sleep(edsStore.gcInterval) - if _, ok := edsStore.lastGCResult.Load().Shards[shardKey]; ok { - break - } - } - assert.Contains(t, edsStore.lastGCResult.Load().Shards, shardKey) - - // assert nil in this context means there was no error re-acquiring the shard during GC - assert.Nil(t, edsStore.lastGCResult.Load().Shards[shardKey]) -} - -func Test_BlockstoreCache(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - // store eds to the store with noopCache to allow clean cache after put - swap := edsStore.cache.Load() - edsStore.cache.Store(cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{})) - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // get any key from saved eds - bs, err := edsStore.carBlockstore(ctx, dah.Hash()) - require.NoError(t, err) - defer func() { - require.NoError(t, bs.Close()) - }() - keys, err := bs.AllKeysChan(ctx) - require.NoError(t, err) - var key cid.Cid - select { - case key = <-keys: - case <-ctx.Done(): - t.Fatal("context timeout") - } - - // swap back original cache - edsStore.cache.Store(swap) - - // key shouldn't be in cache yet, check for returned errCacheMiss - shardKey := shard.KeyFromString(dah.String()) - _, err = edsStore.cache.Load().Get(shardKey) - require.Error(t, err) - - // now get it from blockstore, to trigger storing to cache - _, err = edsStore.Blockstore().Get(ctx, key) - require.NoError(t, err) - - // should be no errCacheMiss anymore - _, err = edsStore.cache.Load().Get(shardKey) - require.NoError(t, err) -} - -// Test_CachedAccessor verifies that the reader represented by a cached accessor can be read from -// multiple times, without exhausting the underlying reader. -func Test_CachedAccessor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // accessor should be in cache - _, err = edsStore.cache.Load().Get(shard.KeyFromString(dah.String())) - require.NoError(t, err) - - // first read from cached accessor - carReader, err := edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - firstBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - // second read from cached accessor - carReader, err = edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - secondBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - require.Equal(t, firstBlock, secondBlock) -} - -// Test_CachedAccessor verifies that the reader represented by a accessor obtained directly from -// dagstore can be read from multiple times, without exhausting the underlying reader. -func Test_NotCachedAccessor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - edsStore, err := newStore(t) - require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) - // replace cache with noopCache to - edsStore.cache.Store(cache.NewDoubleCache(cache.NoopCache{}, cache.NoopCache{})) - - eds, dah := randomEDS(t) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(t, err) - - // accessor will be registered in cache async on put, so give it some time to settle - time.Sleep(time.Millisecond * 100) - - // accessor should not be in cache - _, err = edsStore.cache.Load().Get(shard.KeyFromString(dah.String())) - require.Error(t, err) - - // first read from direct accessor (not from cache) - carReader, err := edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - firstBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - // second read from direct accessor (not from cache) - carReader, err = edsStore.getCAR(ctx, dah.Hash()) - require.NoError(t, err) - secondBlock, err := io.ReadAll(carReader) - require.NoError(t, err) - require.NoError(t, carReader.Close()) - - require.Equal(t, firstBlock, secondBlock) -} - -func BenchmarkStore(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - b.Cleanup(cancel) - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := NewStore(DefaultParameters(), b.TempDir(), ds) - require.NoError(b, err) - err = edsStore.Start(ctx) - require.NoError(b, err) - - // BenchmarkStore/bench_put_128-10 10 3231859283 ns/op (~3sec) - b.Run("bench put 128", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // pause the timer for initializing test data - b.StopTimer() - eds := edstest.RandEDS(b, 128) - dah, err := share.NewRoot(eds) - require.NoError(b, err) - b.StartTimer() - - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(b, err) - } - }) - - // BenchmarkStore/bench_read_128-10 14 78970661 ns/op (~70ms) - b.Run("bench read 128", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // pause the timer for initializing test data - b.StopTimer() - eds := edstest.RandEDS(b, 128) - dah, err := share.NewRoot(eds) - require.NoError(b, err) - _ = edsStore.Put(ctx, dah.Hash(), eds) - b.StartTimer() - - _, err = edsStore.Get(ctx, dah.Hash()) - require.NoError(b, err) - } - }) -} - -// BenchmarkCacheEviction benchmarks the time it takes to load a block to the cache, when the -// cache size is set to 1. This forces cache eviction on every read. -// BenchmarkCacheEviction-10/128 384 3533586 ns/op (~3ms) -func BenchmarkCacheEviction(b *testing.B) { - const ( - blocks = 4 - size = 128 - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - b.Cleanup(cancel) - - dir := b.TempDir() - ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) - require.NoError(b, err) - - newStore := func(params *Parameters) *Store { - edsStore, err := NewStore(params, dir, ds) - require.NoError(b, err) - err = edsStore.Start(ctx) - require.NoError(b, err) - return edsStore - } - edsStore := newStore(DefaultParameters()) - - // generate EDSs and store them - cids := make([]cid.Cid, blocks) - for i := range cids { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(b, err) - - // store cids for read loop later - cids[i] = ipld.MustCidFromNamespacedSha256(dah.RowRoots[0]) - } - - // restart store to clear cache - require.NoError(b, edsStore.Stop(ctx)) - - // set BlockstoreCacheSize to 1 to force eviction on every read - params := DefaultParameters() - params.BlockstoreCacheSize = 1 - bstore := newStore(params).Blockstore() - - // start benchmark - b.ResetTimer() - for i := 0; i < b.N; i++ { - h := cids[i%blocks] - // every read will trigger eviction - _, err := bstore.Get(ctx, h) - require.NoError(b, err) - } -} - -func newStore(t *testing.T) (*Store, error) { - t.Helper() - - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - return NewStore(DefaultParameters(), t.TempDir(), ds) -} - -func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) { - eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) - require.NoError(t, err) - - return eds, dah -} diff --git a/share/eds/testdata/README.md b/share/eds/testdata/README.md deleted file mode 100644 index 960549e2a0..0000000000 --- a/share/eds/testdata/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# CARxEDS Testdata - -This directory contains an example CARv1 file of an EDS and its matching data availability header. - -They might need to be regenerated when modifying constants such as the default share size. This can be done by running the test utility in `eds_test.go` called `createTestData`. diff --git a/share/eds/testdata/example-root.json b/share/eds/testdata/example-root.json deleted file mode 100644 index 999d6301b6..0000000000 --- a/share/eds/testdata/example-root.json +++ /dev/null @@ -1,22 +0,0 @@ -{ -"row_roots": [ -"AAAAAAAAAAAAAAAAAAAAAAAAABPYEuDlO9Dz69oAAAAAAAAAAAAAAAAAAAAAAAAAMcklN0h38T4b/UBC/Cmr5YWmjmmxvi1e35vZBW14b8gDHBoTFVvY6H4J", -"AAAAAAAAAAAAAAAAAAAAAAAAADxyZecUZD41W5IAAAAAAAAAAAAAAAAAAAAAAAAAh8vQUZ38PaWyeUs7dQhphIuRIKiGaTr4KFwEhMRhejTd6/4NHdnKTDyY", -"AAAAAAAAAAAAAAAAAAAAAAAAAKDQatbQSwQ9uJsAAAAAAAAAAAAAAAAAAAAAAAAArtdqXCSsM1OlVCRZqqfZDnEO9eC5cwlgy5MQHb2g4NLr7nZYTruiOoz7", -"AAAAAAAAAAAAAAAAAAAAAAAAAMeUhM8LZBo9sWwAAAAAAAAAAAAAAAAAAAAAAAAA8PtvJpbDc4APKOK6MT1k61HuQXwauWw3nFWwr9pSljiYMv6jjjdLDF8o", -"/////////////////////////////////////////////////////////////////////////////xnHmhDh4Y8vfJrgewAcvLWpvI5XOyATj1IQDkCwvIEh", -"/////////////////////////////////////////////////////////////////////////////+qngp0AfoykfXwsMBukRtYxNA/bzW0+F3J7Q/+S1YZJ", -"/////////////////////////////////////////////////////////////////////////////4WNPrME/2MLrIZgAUoKaVx2GzJqDcYGrBg+sudPKUDy", -"/////////////////////////////////////////////////////////////////////////////6HdebpaHl7iTpLvmuPvtQNnkHfNOPyEhahxbVnIB2d1" -], -"column_roots": [ -"AAAAAAAAAAAAAAAAAAAAAAAAABPYEuDlO9Dz69oAAAAAAAAAAAAAAAAAAAAAAAAAx5SEzwtkGj2xbESyOeamsjGWUBQdAQoiSl+rMtNMo1wEtfGQnFS/g+K+", -"AAAAAAAAAAAAAAAAAAAAAAAAAC3uK6nhCxHTfBwAAAAAAAAAAAAAAAAAAAAAAAAA1fxnqHyO6qV39pcUQ8MuTfJ7RBhbSVWf0aamUP27KRY0II55oJoY6Ng6", -"AAAAAAAAAAAAAAAAAAAAAAAAAC6DkYeeBY/kKvAAAAAAAAAAAAAAAAAAAAAAAAAA47rxk8hoCnWGM+CX47TlYWBeE2unvRhA/j3EvHdxeL1rFRkaYfAd5eg7", -"AAAAAAAAAAAAAAAAAAAAAAAAADHJJTdId/E+G/0AAAAAAAAAAAAAAAAAAAAAAAAA8PtvJpbDc4APKAk5QPSH59HECE2sf/CDLKAZJjWo9DD4sLXJQ4jTZoH6", -"/////////////////////////////////////////////////////////////////////////////4lKCT3K11RnNIuLNfY+SfDZCYAE2iW0hjQHIVBpoN0q", -"/////////////////////////////////////////////////////////////////////////////1NpYcgayEVenbFeEO5LJ1j1/1sD+PvZWHDv+jqT1dLR", -"/////////////////////////////////////////////////////////////////////////////8FOWVuCU0rTzUW9tP2R47RmTBvwXX8ycKrMhgKEi1xa", -"/////////////////////////////////////////////////////////////////////////////7K5SoZ3HF5QgPvIXpKSr9eT4Xfiokc3PUMmXE4pBDTf" -] -} \ No newline at end of file diff --git a/share/eds/testdata/example.car b/share/eds/testdata/example.car deleted file mode 100644 index 4d33c0ef33..0000000000 Binary files a/share/eds/testdata/example.car and /dev/null differ diff --git a/share/eds/testing.go b/share/eds/testing.go new file mode 100644 index 0000000000..8549cf3ca1 --- /dev/null +++ b/share/eds/testing.go @@ -0,0 +1,467 @@ +package eds + +import ( + "context" + "fmt" + "math/rand/v2" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +type ( + createAccessor func(testing.TB, *rsmt2d.ExtendedDataSquare) Accessor + createAccessorStreamer func(testing.TB, *rsmt2d.ExtendedDataSquare) AccessorStreamer +) + +// TestSuiteAccessor runs a suite of tests for the given Accessor implementation. +func TestSuiteAccessor( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + maxSize int, +) { + minSize := 2 + if !checkPowerOfTwo(maxSize) { + t.Errorf("minSize must be power of 2: %v", maxSize) + } + for size := minSize; size <= maxSize; size *= 2 { + for name, eds := range testEDSes(t, size) { + t.Run(fmt.Sprintf("DataHash:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorDataHash(ctx, t, createAccessor, eds) + }) + + t.Run(fmt.Sprintf("AxisRoots:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorAxisRoots(ctx, t, createAccessor, eds) + }) + + t.Run(fmt.Sprintf("Sample:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorSample(ctx, t, createAccessor, eds) + }) + + t.Run(fmt.Sprintf("AxisHalf:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorAxisHalf(ctx, t, createAccessor, eds) + }) + + t.Run(fmt.Sprintf("RowNamespaceData:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorRowNamespaceData(ctx, t, createAccessor, size) + }) + + t.Run(fmt.Sprintf("Shares:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorShares(ctx, t, createAccessor, eds) + }) + } + } +} + +func testEDSes(t *testing.T, sizes ...int) map[string]*rsmt2d.ExtendedDataSquare { + testEDSes := make(map[string]*rsmt2d.ExtendedDataSquare) + for _, size := range sizes { + fullEDS := edstest.RandEDS(t, size) + testEDSes[fmt.Sprintf("FullODS:%d", size)] = fullEDS + + var padding int + for padding < 1 { + padding = rand.IntN(size * size) //nolint:gosec + } + paddingEds := edstest.RandEDSWithTailPadding(t, size, padding) + testEDSes[fmt.Sprintf("PaddedODS:%d", size)] = paddingEds + } + + testEDSes["EmptyODS"] = share.EmptyEDS() + return testEDSes +} + +func TestStreamer( + ctx context.Context, + t *testing.T, + create createAccessorStreamer, + odsSize int, +) { + for name, eds := range testEDSes(t, odsSize) { + t.Run(fmt.Sprintf("Reader:%s", name), func(t *testing.T) { + t.Parallel() + testAccessorReader(ctx, t, create, eds) + }) + } +} + +func testAccessorDataHash( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + eds *rsmt2d.ExtendedDataSquare, +) { + acc := createAccessor(t, eds) + + expected, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + datahash, err := acc.DataHash(ctx) + require.NoError(t, err) + require.Equal(t, share.DataHash(expected.Hash()), datahash) +} + +func testAccessorAxisRoots( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + eds *rsmt2d.ExtendedDataSquare, +) { + acc := createAccessor(t, eds) + + expected, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + roots, err := acc.AxisRoots(ctx) + require.NoError(t, err) + require.True(t, expected.Equals(roots)) +} + +func testAccessorSample( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + eds *rsmt2d.ExtendedDataSquare, +) { + width := int(eds.Width()) + t.Run("single thread", func(t *testing.T) { + acc := createAccessor(t, eds) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + // t.Parallel() this fails the test for some reason + for rowIdx := 0; rowIdx < width; rowIdx++ { + for colIdx := 0; colIdx < width; colIdx++ { + testSample(ctx, t, acc, roots, colIdx, rowIdx) + } + } + }) + + t.Run("parallel", func(t *testing.T) { + t.Parallel() + acc := createAccessor(t, eds) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + wg := sync.WaitGroup{} + for rowIdx := 0; rowIdx < width; rowIdx++ { + for colIdx := 0; colIdx < width; colIdx++ { + wg.Add(1) + go func(rowIdx, colIdx int) { + defer wg.Done() + testSample(ctx, t, acc, roots, rowIdx, colIdx) + }(rowIdx, colIdx) + } + } + wg.Wait() + }) + + t.Run("random", func(t *testing.T) { + t.Parallel() + acc := createAccessor(t, eds) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + wg := sync.WaitGroup{} + for range 1000 { + wg.Add(1) + go func() { + defer wg.Done() + rowIdx, colIdx := rand.IntN(width), rand.IntN(width) //nolint:gosec + testSample(ctx, t, acc, roots, rowIdx, colIdx) + }() + } + wg.Wait() + }) +} + +func testSample( + ctx context.Context, + t *testing.T, + acc Accessor, + roots *share.AxisRoots, + rowIdx, colIdx int, +) { + shr, err := acc.Sample(ctx, rowIdx, colIdx) + require.NoError(t, err) + + err = shr.Verify(roots, rowIdx, colIdx) + require.NoError(t, err) +} + +func testAccessorRowNamespaceData( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + odsSize int, +) { + t.Run("included", func(t *testing.T) { + t.Parallel() + // generate EDS with random data and some Shares with the same namespace + sharesAmount := odsSize * odsSize + namespace := sharetest.RandV0Namespace() + // test with different amount of shares + for amount := 1; amount < sharesAmount; amount++ { + // select random amount of shares, but not less than 1 + eds, roots := edstest.RandEDSWithNamespace(t, namespace, amount, odsSize) + acc := createAccessor(t, eds) + + var actualSharesAmount int + // loop over all rows and check that the amount of shares in the namespace is equal to the expected + // amount + for i, root := range roots.RowRoots { + rowData, err := acc.RowNamespaceData(ctx, namespace, i) + + // namespace is not included in the row, so there should be no shares + if namespace.IsOutsideRange(root, root) { + require.ErrorIs(t, err, shwap.ErrNamespaceOutsideRange) + require.Len(t, rowData.Shares, 0) + continue + } + + actualSharesAmount += len(rowData.Shares) + require.NoError(t, err) + require.True(t, len(rowData.Shares) > 0) + err = rowData.Verify(roots, namespace, i) + require.NoError(t, err) + } + + // check that the amount of shares in the namespace is equal to the expected amount + require.Equal(t, amount, actualSharesAmount) + } + }) + + t.Run("not included", func(t *testing.T) { + t.Parallel() + // generate EDS with random data and some Shares with the same namespace + eds := edstest.RandEDS(t, odsSize) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + // loop over first half of the rows, because the second half is parity and does not contain + // namespaced shares + for i, root := range roots.RowRoots[:odsSize] { + // select namespace that within the range of root namespaces, but is not included + maxNs := nmt.MaxNamespace(root, share.NamespaceSize) + absentNs, err := share.Namespace(maxNs).AddInt(-1) + require.NoError(t, err) + + acc := createAccessor(t, eds) + rowData, err := acc.RowNamespaceData(ctx, absentNs, i) + require.NoError(t, err) + + // namespace is not included in the row, so there should be no shares + require.Len(t, rowData.Shares, 0) + require.True(t, rowData.Proof.IsOfAbsence()) + + err = rowData.Verify(roots, absentNs, i) + require.NoError(t, err) + } + }) +} + +func testAccessorAxisHalf( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + eds *rsmt2d.ExtendedDataSquare, +) { + odsSize := int(eds.Width() / 2) + acc := createAccessor(t, eds) + + t.Run("single thread", func(t *testing.T) { + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for axisIdx := 0; axisIdx < int(eds.Width()); axisIdx++ { + half, err := acc.AxisHalf(ctx, axisType, axisIdx) + require.NoError(t, err) + require.Len(t, half.Shares, odsSize) + + var expected []share.Share + if half.IsParity { + expected = getAxis(eds, axisType, axisIdx)[odsSize:] + } else { + expected = getAxis(eds, axisType, axisIdx)[:odsSize] + } + + require.Equal(t, expected, half.Shares) + } + } + }) + + t.Run("parallel", func(t *testing.T) { + t.Parallel() + wg := sync.WaitGroup{} + for _, axisType := range []rsmt2d.Axis{rsmt2d.Col, rsmt2d.Row} { + for i := 0; i < int(eds.Width()); i++ { + wg.Add(1) + go func(axisType rsmt2d.Axis, idx int) { + defer wg.Done() + half, err := acc.AxisHalf(ctx, axisType, idx) + require.NoError(t, err) + require.Len(t, half.Shares, odsSize) + + var expected []share.Share + if half.IsParity { + expected = getAxis(eds, axisType, idx)[odsSize:] + } else { + expected = getAxis(eds, axisType, idx)[:odsSize] + } + + require.Equal(t, expected, half.Shares) + }(axisType, i) + } + } + wg.Wait() + }) +} + +func testAccessorShares( + ctx context.Context, + t *testing.T, + createAccessor createAccessor, + eds *rsmt2d.ExtendedDataSquare, +) { + acc := createAccessor(t, eds) + + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + shares, err := acc.Shares(ctx) + require.NoError(t, err) + expected := eds.FlattenedODS() + require.Equal(t, expected, shares) + }() + } + wg.Wait() +} + +func testAccessorReader( + ctx context.Context, + t *testing.T, + createAccessor createAccessorStreamer, + eds *rsmt2d.ExtendedDataSquare, +) { + acc := createAccessor(t, eds) + + // verify that the reader represented by accessor can be read from + // multiple times, without exhausting the underlying reader. + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testReader(ctx, t, eds, acc) + }() + } + wg.Wait() +} + +func testReader(ctx context.Context, t *testing.T, eds *rsmt2d.ExtendedDataSquare, as AccessorStreamer) { + reader, err := as.Reader() + require.NoError(t, err) + + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + actual, err := ReadAccessor(ctx, reader, roots) + require.NoError(t, err) + require.True(t, eds.Equals(actual.ExtendedDataSquare)) +} + +func BenchGetHalfAxisFromAccessor( + ctx context.Context, + b *testing.B, + createAccessor createAccessor, + minOdsSize, maxOdsSize int, +) { + for size := minOdsSize; size <= maxOdsSize; size *= 2 { + eds := edstest.RandEDS(b, size) + acc := createAccessor(b, eds) + + // loop over all possible axis types and quadrants + for _, axisType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for _, squareHalf := range []int{0, 1} { + name := fmt.Sprintf("Size:%v/ProofType:%s/squareHalf:%s", size, axisType, strconv.Itoa(squareHalf)) + b.Run(name, func(b *testing.B) { + // warm up cache + _, err := acc.AxisHalf(ctx, axisType, acc.Size(ctx)/2*(squareHalf)) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := acc.AxisHalf(ctx, axisType, acc.Size(ctx)/2*(squareHalf)) + require.NoError(b, err) + } + }) + } + } + } +} + +func BenchGetSampleFromAccessor( + ctx context.Context, + b *testing.B, + createAccessor createAccessor, + minOdsSize, maxOdsSize int, +) { + for size := minOdsSize; size <= maxOdsSize; size *= 2 { + eds := edstest.RandEDS(b, size) + acc := createAccessor(b, eds) + + // loop over all possible axis types and quadrants + for _, q := range quadrants { + name := fmt.Sprintf("Size:%v/quadrant:%s", size, q) + b.Run(name, func(b *testing.B) { + rowIdx, colIdx := q.coordinates(acc.Size(ctx)) + // warm up cache + _, err := acc.Sample(ctx, rowIdx, colIdx) + require.NoError(b, err, q.String()) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := acc.Sample(ctx, rowIdx, colIdx) + require.NoError(b, err) + } + }) + } + } +} + +type quadrantIdx int + +var quadrants = []quadrantIdx{1, 2, 3, 4} + +func (q quadrantIdx) String() string { + return strconv.Itoa(int(q)) +} + +func (q quadrantIdx) coordinates(edsSize int) (rowIdx, colIdx int) { + colIdx = edsSize/2*(int(q-1)%2) + 1 + rowIdx = edsSize/2*(int(q-1)/2) + 1 + return rowIdx, colIdx +} + +func checkPowerOfTwo(n int) bool { + // added one corner case if n is zero it will also consider as power 2 + if n == 0 { + return true + } + return n&(n-1) == 0 +} diff --git a/share/eds/utils.go b/share/eds/utils.go deleted file mode 100644 index fd152e246f..0000000000 --- a/share/eds/utils.go +++ /dev/null @@ -1,150 +0,0 @@ -package eds - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/filecoin-project/dagstore" - "github.com/ipfs/boxo/blockservice" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "golang.org/x/sync/errgroup" - - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds/cache" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -// readCloser is a helper struct, that combines io.Reader and io.Closer -type readCloser struct { - io.Reader - io.Closer -} - -// BlockstoreCloser represents a blockstore that can also be closed. It combines the functionality -// of a dagstore.ReadBlockstore with that of an io.Closer. -type BlockstoreCloser struct { - dagstore.ReadBlockstore - io.Closer -} - -func newReadCloser(ac cache.Accessor) io.ReadCloser { - return readCloser{ - ac.Reader(), - ac, - } -} - -// blockstoreCloser constructs new BlockstoreCloser from cache.Accessor -func blockstoreCloser(ac cache.Accessor) (*BlockstoreCloser, error) { - bs, err := ac.Blockstore() - if err != nil { - return nil, fmt.Errorf("eds/store: failed to get blockstore: %w", err) - } - return &BlockstoreCloser{ - ReadBlockstore: bs, - Closer: ac, - }, nil -} - -func closeAndLog(name string, closer io.Closer) { - if err := closer.Close(); err != nil { - log.Warnw("closing "+name, "err", err) - } -} - -// RetrieveNamespaceFromStore gets all EDS shares in the given namespace from -// the EDS store through the corresponding CAR-level blockstore. It is extracted -// from the store getter to make it available for reuse in the shrexnd server. -func RetrieveNamespaceFromStore( - ctx context.Context, - store *Store, - dah *share.Root, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - if err = namespace.ValidateForData(); err != nil { - return nil, err - } - - bs, err := store.CARBlockstore(ctx, dah.Hash()) - if errors.Is(err, ErrNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("failed to retrieve blockstore from eds store: %w", err) - } - defer func() { - if err := bs.Close(); err != nil { - log.Warnw("closing blockstore", "err", err) - } - }() - - // wrap the read-only CAR blockstore in a getter - blockGetter := NewBlockGetter(bs) - shares, err = CollectSharesByNamespace(ctx, blockGetter, dah, namespace) - if errors.Is(err, ipld.ErrNodeNotFound) { - // IPLD node not found after the index pointed to this shard and the CAR - // blockstore has been opened successfully is a strong indicator of - // corruption. We remove the block on bridges and fulls and return - // share.ErrNotFound to ensure the data is retrieved by the next getter. - // Note that this recovery is manual and will only be restored by an RPC - // call to SharesAvailable that fetches the same datahash that was - // removed. - err = store.Remove(ctx, dah.Hash()) - if err != nil { - log.Errorf("failed to remove CAR from store after detected corruption: %w", err) - } - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("failed to retrieve shares by namespace from store: %w", err) - } - - return shares, nil -} - -// CollectSharesByNamespace collects NamespaceShares within the given namespace from share.Root. -func CollectSharesByNamespace( - ctx context.Context, - bg blockservice.BlockGetter, - root *share.Root, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - ctx, span := tracer.Start(ctx, "collect-shares-by-namespace", trace.WithAttributes( - attribute.String("namespace", namespace.String()), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - rootCIDs := ipld.FilterRootByNamespace(root, namespace) - if len(rootCIDs) == 0 { - return []share.NamespacedRow{}, nil - } - - errGroup, ctx := errgroup.WithContext(ctx) - shares = make([]share.NamespacedRow, len(rootCIDs)) - for i, rootCID := range rootCIDs { - errGroup.Go(func() error { - row, proof, err := ipld.GetSharesByNamespace(ctx, bg, rootCID, namespace, len(root.RowRoots)) - shares[i] = share.NamespacedRow{ - Shares: row, - Proof: proof, - } - if err != nil { - return fmt.Errorf("retrieving shares by namespace %s for row %x: %w", namespace.String(), rootCID, err) - } - return nil - }) - } - - if err := errGroup.Wait(); err != nil { - return nil, err - } - - return shares, nil -} diff --git a/share/eds/validation.go b/share/eds/validation.go new file mode 100644 index 0000000000..41ec6c45c4 --- /dev/null +++ b/share/eds/validation.go @@ -0,0 +1,67 @@ +package eds + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ Accessor = validation{} + +// ErrOutOfBounds is returned whenever an index is out of bounds. +var ErrOutOfBounds = errors.New("index is out of bounds") + +// validation is a Accessor implementation that performs sanity checks on methods. It wraps +// another Accessor and performs bounds checks on index arguments. +type validation struct { + Accessor + size *atomic.Int32 +} + +func WithValidation(f Accessor) Accessor { + return &validation{Accessor: f, size: new(atomic.Int32)} +} + +func (f validation) Size(ctx context.Context) int { + size := f.size.Load() + if size == 0 { + loaded := f.Accessor.Size(ctx) + f.size.Store(int32(loaded)) + return loaded + } + return int(size) +} + +func (f validation) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) { + _, err := shwap.NewSampleID(1, rowIdx, colIdx, f.Size(ctx)) + if err != nil { + return shwap.Sample{}, fmt.Errorf("sample validation: %w", err) + } + return f.Accessor.Sample(ctx, rowIdx, colIdx) +} + +func (f validation) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (AxisHalf, error) { + _, err := shwap.NewRowID(1, axisIdx, f.Size(ctx)) + if err != nil { + return AxisHalf{}, fmt.Errorf("axis half validation: %w", err) + } + return f.Accessor.AxisHalf(ctx, axisType, axisIdx) +} + +func (f validation) RowNamespaceData( + ctx context.Context, + namespace share.Namespace, + rowIdx int, +) (shwap.RowNamespaceData, error) { + _, err := shwap.NewRowNamespaceDataID(1, rowIdx, namespace, f.Size(ctx)) + if err != nil { + return shwap.RowNamespaceData{}, fmt.Errorf("row namespace data validation: %w", err) + } + return f.Accessor.RowNamespaceData(ctx, namespace, rowIdx) +} diff --git a/share/eds/validation_test.go b/share/eds/validation_test.go new file mode 100644 index 0000000000..4be8dc7230 --- /dev/null +++ b/share/eds/validation_test.go @@ -0,0 +1,103 @@ +package eds + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestValidation_Sample(t *testing.T) { + tests := []struct { + name string + rowIdx, colIdx int + odsSize int + expectFail bool + }{ + {"ValidIndices", 3, 2, 4, false}, + {"OutOfBoundsX", 8, 3, 4, true}, + {"OutOfBoundsY", 3, 8, 4, true}, + {"NegativeX", -1, 4, 8, true}, + {"NegativeY", 3, -1, 8, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + randEDS := edstest.RandEDS(t, tt.odsSize) + accessor := &Rsmt2D{ExtendedDataSquare: randEDS} + validation := WithValidation(AccessorAndStreamer(accessor, nil)) + + _, err := validation.Sample(context.Background(), tt.rowIdx, tt.colIdx) + if tt.expectFail { + require.ErrorIs(t, err, shwap.ErrInvalidID) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidation_AxisHalf(t *testing.T) { + tests := []struct { + name string + axisType rsmt2d.Axis + axisIdx int + odsSize int + expectFail bool + }{ + {"ValidIndex", rsmt2d.Row, 2, 4, false}, + {"OutOfBounds", rsmt2d.Col, 8, 4, true}, + {"NegativeIndex", rsmt2d.Row, -1, 4, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + randEDS := edstest.RandEDS(t, tt.odsSize) + accessor := &Rsmt2D{ExtendedDataSquare: randEDS} + validation := WithValidation(AccessorAndStreamer(accessor, nil)) + + _, err := validation.AxisHalf(context.Background(), tt.axisType, tt.axisIdx) + if tt.expectFail { + require.ErrorIs(t, err, shwap.ErrInvalidID) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidation_RowNamespaceData(t *testing.T) { + tests := []struct { + name string + rowIdx int + odsSize int + expectFail bool + }{ + {"ValidIndex", 3, 4, false}, + {"OutOfBounds", 8, 4, true}, + {"NegativeIndex", -1, 4, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + randEDS := edstest.RandEDS(t, tt.odsSize) + accessor := &Rsmt2D{ExtendedDataSquare: randEDS} + validation := WithValidation(AccessorAndStreamer(accessor, nil)) + + ns := sharetest.RandV0Namespace() + _, err := validation.RowNamespaceData(context.Background(), ns, tt.rowIdx) + if tt.expectFail { + require.ErrorIs(t, err, shwap.ErrInvalidID) + } else { + require.True(t, err == nil || errors.Is(err, shwap.ErrNamespaceOutsideRange), err) + } + }) + } +} diff --git a/share/empty.go b/share/empty.go index 9d1a4ff561..4ffd6ade82 100644 --- a/share/empty.go +++ b/share/empty.go @@ -11,14 +11,20 @@ import ( "github.com/celestiaorg/rsmt2d" ) -// EmptyRoot returns Root of the empty block EDS. -func EmptyRoot() *Root { +// EmptyEDSDataHash returns DataHash of the empty block EDS. +func EmptyEDSDataHash() DataHash { initEmpty() - return emptyBlockRoot + return emptyBlockDataHash } -// EmptyExtendedDataSquare returns the EDS of the empty block data square. -func EmptyExtendedDataSquare() *rsmt2d.ExtendedDataSquare { +// EmptyEDSRoots returns AxisRoots of the empty block EDS. +func EmptyEDSRoots() *AxisRoots { + initEmpty() + return emptyBlockRoots +} + +// EmptyEDS returns the EDS of the empty block data square. +func EmptyEDS() *rsmt2d.ExtendedDataSquare { initEmpty() return emptyBlockEDS } @@ -30,10 +36,11 @@ func EmptyBlockShares() []Share { } var ( - emptyOnce sync.Once - emptyBlockRoot *Root - emptyBlockEDS *rsmt2d.ExtendedDataSquare - emptyBlockShares []Share + emptyOnce sync.Once + emptyBlockDataHash DataHash + emptyBlockRoots *AxisRoots + emptyBlockEDS *rsmt2d.ExtendedDataSquare + emptyBlockShares []Share ) // initEmpty enables lazy initialization for constant empty block data. @@ -52,16 +59,16 @@ func computeEmpty() { } emptyBlockEDS = eds - emptyBlockRoot, err = NewRoot(eds) + emptyBlockRoots, err = NewAxisRoots(eds) if err != nil { panic(fmt.Errorf("failed to create empty DAH: %w", err)) } minDAH := da.MinDataAvailabilityHeader() - if !bytes.Equal(minDAH.Hash(), emptyBlockRoot.Hash()) { + if !bytes.Equal(minDAH.Hash(), emptyBlockRoots.Hash()) { panic(fmt.Sprintf("mismatch in calculated minimum DAH and minimum DAH from celestia-app, "+ - "expected %s, got %s", minDAH.String(), emptyBlockRoot.String())) + "expected %s, got %s", minDAH.String(), emptyBlockRoots.String())) } // precompute Hash, so it's cached internally to avoid potential races - emptyBlockRoot.Hash() + emptyBlockDataHash = emptyBlockRoots.Hash() } diff --git a/share/getter.go b/share/getter.go deleted file mode 100644 index d824134152..0000000000 --- a/share/getter.go +++ /dev/null @@ -1,97 +0,0 @@ -package share - -import ( - "context" - "errors" - "fmt" - - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" -) - -var ( - // ErrNotFound is used to indicate that requested data could not be found. - ErrNotFound = errors.New("share: data not found") - // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the - // square size. - ErrOutOfBounds = errors.New("share: row or column index is larger than square size") -) - -// Getter interface provides a set of accessors for shares by the Root. -// Automatically verifies integrity of shares(exceptions possible depending on the implementation). -// -//go:generate mockgen -destination=mocks/getter.go -package=mocks . Getter -type Getter interface { - // GetShare gets a Share by coordinates in EDS. - GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (Share, error) - - // GetEDS gets the full EDS identified by the given extended header. - GetEDS(context.Context, *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) - - // GetSharesByNamespace gets all shares from an EDS within the given namespace. - // Shares are returned in a row-by-row order if the namespace spans multiple rows. - // Inclusion of returned data could be verified using Verify method on NamespacedShares. - // If no shares are found for target namespace non-inclusion could be also verified by calling - // Verify method. - GetSharesByNamespace(context.Context, *header.ExtendedHeader, Namespace) (NamespacedShares, error) -} - -// NamespacedShares represents all shares with proofs within a specific namespace of an EDS. -type NamespacedShares []NamespacedRow - -// Flatten returns the concatenated slice of all NamespacedRow shares. -func (ns NamespacedShares) Flatten() []Share { - var shares []Share - for _, row := range ns { - shares = append(shares, row.Shares...) - } - return shares -} - -// NamespacedRow represents all shares with proofs within a specific namespace of a single EDS row. -type NamespacedRow struct { - Shares []Share `json:"shares"` - Proof *nmt.Proof `json:"proof"` -} - -// Verify validates NamespacedShares by checking every row with nmt inclusion proof. -func (ns NamespacedShares) Verify(root *Root, namespace Namespace) error { - var originalRoots [][]byte - for _, row := range root.RowRoots { - if !namespace.IsOutsideRange(row, row) { - originalRoots = append(originalRoots, row) - } - } - - if len(originalRoots) != len(ns) { - return fmt.Errorf("amount of rows differs between root and namespace shares: expected %d, got %d", - len(originalRoots), len(ns)) - } - - for i, row := range ns { - // verify row data against row hash from original root - if !row.verify(originalRoots[i], namespace) { - return fmt.Errorf("row verification failed: row %d doesn't match original root: %s", i, root.String()) - } - } - return nil -} - -// verify validates the row using nmt inclusion proof. -func (row *NamespacedRow) verify(rowRoot []byte, namespace Namespace) bool { - // construct nmt leaves from shares by prepending namespace - leaves := make([][]byte, 0, len(row.Shares)) - for _, shr := range row.Shares { - leaves = append(leaves, append(GetNamespace(shr), shr...)) - } - - // verify namespace - return row.Proof.VerifyNamespace( - NewSHA256Hasher(), - namespace.ToNMT(), - leaves, - rowRoot, - ) -} diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go deleted file mode 100644 index 47a3c4ac24..0000000000 --- a/share/getters/getter_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package getters - -import ( - "context" - "os" - "sync" - "testing" - "time" - - "github.com/ipfs/boxo/exchange/offline" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - dsbadger "github.com/ipfs/go-ds-badger4" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-app/v2/pkg/da" - "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/sharetest" -) - -func TestStoreGetter(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - tmpDir := t.TempDir() - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(storeCfg, tmpDir, ds) - require.NoError(t, err) - - err = edsStore.Start(ctx) - require.NoError(t, err) - - sg := NewStoreGetter(edsStore) - - t.Run("GetShare", func(t *testing.T) { - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - squareSize := int(randEds.Width()) - for i := 0; i < squareSize; i++ { - for j := 0; j < squareSize; j++ { - share, err := sg.GetShare(ctx, eh, i, j) - require.NoError(t, err) - assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) - } - } - - // doesn't panic on indexes too high - _, err := sg.GetShare(ctx, eh, squareSize, squareSize) - require.ErrorIs(t, err, share.ErrOutOfBounds) - - // root not found - _, eh = randomEDS(t) - _, err = sg.GetShare(ctx, eh, 0, 0) - require.ErrorIs(t, err, share.ErrNotFound) - }) - - t.Run("GetEDS", func(t *testing.T) { - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - retrievedEDS, err := sg.GetEDS(ctx, eh) - require.NoError(t, err) - assert.True(t, randEds.Equals(retrievedEDS)) - - // root not found - emptyRoot := da.MinDataAvailabilityHeader() - eh.DAH = &emptyRoot - _, err = sg.GetEDS(ctx, eh) - require.ErrorIs(t, err, share.ErrNotFound) - }) - - t.Run("GetSharesByNamespace", func(t *testing.T) { - randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(eh.DAH, namespace)) - assert.Len(t, shares.Flatten(), 2) - - // namespace not found - randNamespace := sharetest.RandV0Namespace() - emptyShares, err := sg.GetSharesByNamespace(ctx, eh, randNamespace) - require.NoError(t, err) - require.Nil(t, emptyShares.Flatten()) - - // root not found - emptyRoot := da.MinDataAvailabilityHeader() - eh.DAH = &emptyRoot - _, err = sg.GetSharesByNamespace(ctx, eh, namespace) - require.ErrorIs(t, err, share.ErrNotFound) - }) - - t.Run("GetSharesFromNamespace removes corrupted shard", func(t *testing.T) { - randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - // available - shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(eh.DAH, namespace)) - assert.Len(t, shares.Flatten(), 2) - - // 'corrupt' existing CAR by overwriting with a random EDS - f, err := os.OpenFile(tmpDir+"/blocks/"+eh.DAH.String(), os.O_WRONLY, 0o644) - require.NoError(t, err) - edsToOverwriteWith, eh := randomEDS(t) - err = eds.WriteEDS(ctx, edsToOverwriteWith, f) - require.NoError(t, err) - - shares, err = sg.GetSharesByNamespace(ctx, eh, namespace) - require.ErrorIs(t, err, share.ErrNotFound) - require.Nil(t, shares) - - // corruption detected, shard is removed - // try every 200ms until it passes or the context ends - ticker := time.NewTicker(200 * time.Millisecond) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - t.Fatal("context ended before successful retrieval") - case <-ticker.C: - has, err := edsStore.Has(ctx, eh.DAH.Hash()) - if err != nil { - t.Fatal(err) - } - if !has { - require.NoError(t, err) - return - } - } - } - }) -} - -func TestIPLDGetter(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - edsStore, err := eds.NewStore(storeCfg, t.TempDir(), ds) - require.NoError(t, err) - - err = edsStore.Start(ctx) - require.NoError(t, err) - - bStore := edsStore.Blockstore() - bserv := ipld.NewBlockservice(bStore, offline.Exchange(edsStore.Blockstore())) - sg := NewIPLDGetter(bserv) - - t.Run("GetShare", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - squareSize := int(randEds.Width()) - for i := 0; i < squareSize; i++ { - for j := 0; j < squareSize; j++ { - share, err := sg.GetShare(ctx, eh, i, j) - require.NoError(t, err) - assert.Equal(t, randEds.GetCell(uint(i), uint(j)), share) - } - } - - // doesn't panic on indexes too high - _, err := sg.GetShare(ctx, eh, squareSize+1, squareSize+1) - require.ErrorIs(t, err, share.ErrOutOfBounds) - - // root not found - _, eh = randomEDS(t) - _, err = sg.GetShare(ctx, eh, 0, 0) - require.ErrorIs(t, err, share.ErrNotFound) - }) - - t.Run("GetEDS", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - randEds, eh := randomEDS(t) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - retrievedEDS, err := sg.GetEDS(ctx, eh) - require.NoError(t, err) - assert.True(t, randEds.Equals(retrievedEDS)) - - // Ensure blocks still exist after cleanup - colRoots, _ := retrievedEDS.ColRoots() - has, err := bStore.Has(ctx, ipld.MustCidFromNamespacedSha256(colRoots[0])) - assert.NoError(t, err) - assert.True(t, has) - }) - - t.Run("GetSharesByNamespace", func(t *testing.T) { - ctx, cancel := context.WithTimeout(ctx, time.Second) - t.Cleanup(cancel) - - randEds, namespace, eh := randomEDSWithDoubledNamespace(t, 4) - err = edsStore.Put(ctx, eh.DAH.Hash(), randEds) - require.NoError(t, err) - - // first check that shares are returned correctly if they exist - shares, err := sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.NoError(t, shares.Verify(eh.DAH, namespace)) - assert.Len(t, shares.Flatten(), 2) - - // namespace not found - randNamespace := sharetest.RandV0Namespace() - emptyShares, err := sg.GetSharesByNamespace(ctx, eh, randNamespace) - require.NoError(t, err) - require.Nil(t, emptyShares.Flatten()) - - // nid doesn't exist in root - emptyRoot := da.MinDataAvailabilityHeader() - eh.DAH = &emptyRoot - emptyShares, err = sg.GetSharesByNamespace(ctx, eh, namespace) - require.NoError(t, err) - require.Empty(t, emptyShares.Flatten()) - }) -} - -// BenchmarkIPLDGetterOverBusyCache benchmarks the performance of the IPLDGetter when the -// cache size of the underlying blockstore is less than the number of blocks being requested in -// parallel. This is to ensure performance doesn't degrade when the cache is being frequently -// evicted. -// BenchmarkIPLDGetterOverBusyCache-10/128 1 12460428417 ns/op (~12s) -func BenchmarkIPLDGetterOverBusyCache(b *testing.B) { - const ( - blocks = 10 - size = 128 - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - b.Cleanup(cancel) - - dir := b.TempDir() - ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions) - require.NoError(b, err) - - newStore := func(params *eds.Parameters) *eds.Store { - edsStore, err := eds.NewStore(params, dir, ds) - require.NoError(b, err) - err = edsStore.Start(ctx) - require.NoError(b, err) - return edsStore - } - edsStore := newStore(eds.DefaultParameters()) - - // generate EDSs and store them - headers := make([]*header.ExtendedHeader, blocks) - for i := range headers { - eds := edstest.RandEDS(b, size) - dah, err := da.NewDataAvailabilityHeader(eds) - require.NoError(b, err) - err = edsStore.Put(ctx, dah.Hash(), eds) - require.NoError(b, err) - - eh := headertest.RandExtendedHeader(b) - eh.DAH = &dah - - // store cids for read loop later - headers[i] = eh - } - - // restart store to clear cache - require.NoError(b, edsStore.Stop(ctx)) - - // set BlockstoreCacheSize to 1 to force eviction on every read - params := eds.DefaultParameters() - params.BlockstoreCacheSize = 1 - edsStore = newStore(params) - bstore := edsStore.Blockstore() - bserv := ipld.NewBlockservice(bstore, offline.Exchange(bstore)) - - // start client - getter := NewIPLDGetter(bserv) - - // request blocks in parallel - b.ResetTimer() - g := sync.WaitGroup{} - g.Add(blocks) - for _, h := range headers { - go func() { - defer g.Done() - _, err := getter.GetEDS(ctx, h) - require.NoError(b, err) - }() - } - g.Wait() -} - -func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *header.ExtendedHeader) { - eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) - require.NoError(t, err) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - return eds, eh -} - -// randomEDSWithDoubledNamespace generates a random EDS and ensures that there are two shares in the -// middle that share a namespace. -// -//nolint:dupword -func randomEDSWithDoubledNamespace( - t *testing.T, - size int, -) (*rsmt2d.ExtendedDataSquare, []byte, *header.ExtendedHeader) { - n := size * size - randShares := sharetest.RandShares(t, n) - idx1 := (n - 1) / 2 - idx2 := n / 2 - - // Make it so that the two shares in two different rows have a common - // namespace. For example if size=4, the original data square looks like - // this: - // _ _ _ _ - // _ _ _ D - // D _ _ _ - // _ _ _ _ - // where the D shares have a common namespace. - copy(share.GetNamespace(randShares[idx2]), share.GetNamespace(randShares[idx1])) - - eds, err := rsmt2d.ComputeExtendedDataSquare( - randShares, - share.DefaultRSMT2DCodec(), - wrapper.NewConstructor(uint64(size)), - ) - require.NoError(t, err, "failure to recompute the extended data square") - dah, err := share.NewRoot(eds) - require.NoError(t, err) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - - return eds, share.GetNamespace(randShares[idx1]), eh -} diff --git a/share/getters/ipld.go b/share/getters/ipld.go deleted file mode 100644 index e9c930248d..0000000000 --- a/share/getters/ipld.go +++ /dev/null @@ -1,165 +0,0 @@ -package getters - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - - "github.com/ipfs/boxo/blockservice" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/eds/byzantine" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -var _ share.Getter = (*IPLDGetter)(nil) - -// IPLDGetter is a share.Getter that retrieves shares from the bitswap network. Result caching is -// handled by the provided blockservice. A blockservice session will be created for retrieval if the -// passed context is wrapped with WithSession. -type IPLDGetter struct { - rtrv *eds.Retriever - bServ blockservice.BlockService -} - -// NewIPLDGetter creates a new share.Getter that retrieves shares from the bitswap network. -func NewIPLDGetter(bServ blockservice.BlockService) *IPLDGetter { - return &IPLDGetter{ - rtrv: eds.NewRetriever(bServ), - bServ: bServ, - } -} - -// GetShare gets a single share at the given EDS coordinates from the bitswap network. -func (ig *IPLDGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { - var err error - ctx, span := tracer.Start(ctx, "ipld/get-share", trace.WithAttributes( - attribute.Int("row", row), - attribute.Int("col", col), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - dah := header.DAH - upperBound := len(dah.RowRoots) - if row >= upperBound || col >= upperBound { - err := share.ErrOutOfBounds - span.RecordError(err) - return nil, err - } - root, leaf := ipld.Translate(dah, row, col) - - // wrap the blockservice in a session if it has been signaled in the context. - blockGetter := getGetter(ctx, ig.bServ) - s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/ipld: failed to retrieve share: %w", err) - } - - return s, nil -} - -func (ig *IPLDGetter) GetEDS( - ctx context.Context, - header *header.ExtendedHeader, -) (eds *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "ipld/get-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - // rtrv.Retrieve calls shares.GetShares until enough shares are retrieved to reconstruct the EDS - eds, err = ig.rtrv.Retrieve(ctx, header.DAH) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - var errByz *byzantine.ErrByzantine - if errors.As(err, &errByz) { - return nil, err - } - if err != nil { - return nil, fmt.Errorf("getter/ipld: failed to retrieve eds: %w", err) - } - return eds, nil -} - -func (ig *IPLDGetter) GetSharesByNamespace( - ctx context.Context, - header *header.ExtendedHeader, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - ctx, span := tracer.Start(ctx, "ipld/get-shares-by-namespace", trace.WithAttributes( - attribute.String("namespace", namespace.String()), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - if err = namespace.ValidateForData(); err != nil { - return nil, err - } - - // wrap the blockservice in a session if it has been signaled in the context. - blockGetter := getGetter(ctx, ig.bServ) - shares, err = eds.CollectSharesByNamespace(ctx, blockGetter, header.DAH, namespace) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/ipld: failed to retrieve shares by namespace: %w", err) - } - return shares, nil -} - -var sessionKey = &session{} - -// session is a struct that can optionally be passed by context to the share.Getter methods using -// WithSession to indicate that a blockservice session should be created. -type session struct { - sync.Mutex - atomic.Pointer[blockservice.Session] - ctx context.Context -} - -// WithSession stores an empty session in the context, indicating that a blockservice session should -// be created. -func WithSession(ctx context.Context) context.Context { - return context.WithValue(ctx, sessionKey, &session{ctx: ctx}) -} - -func getGetter(ctx context.Context, service blockservice.BlockService) blockservice.BlockGetter { - s, ok := ctx.Value(sessionKey).(*session) - if !ok { - return service - } - - val := s.Load() - if val != nil { - return val - } - - s.Lock() - defer s.Unlock() - val = s.Load() - if val == nil { - val = blockservice.NewSession(s.ctx, service) - s.Store(val) - } - return val -} diff --git a/share/getters/store.go b/share/getters/store.go deleted file mode 100644 index d66a057c56..0000000000 --- a/share/getters/store.go +++ /dev/null @@ -1,122 +0,0 @@ -package getters - -import ( - "context" - "errors" - "fmt" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/celestiaorg/rsmt2d" - - "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/ipld" -) - -var _ share.Getter = (*StoreGetter)(nil) - -// StoreGetter is a share.Getter that retrieves shares from an eds.Store. No results are saved to -// the eds.Store after retrieval. -type StoreGetter struct { - store *eds.Store -} - -// NewStoreGetter creates a new share.Getter that retrieves shares from an eds.Store. -func NewStoreGetter(store *eds.Store) *StoreGetter { - return &StoreGetter{ - store: store, - } -} - -// GetShare gets a single share at the given EDS coordinates from the eds.Store through the -// corresponding CAR-level blockstore. -func (sg *StoreGetter) GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) { - dah := header.DAH - var err error - ctx, span := tracer.Start(ctx, "store/get-share", trace.WithAttributes( - attribute.Int("row", row), - attribute.Int("col", col), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - upperBound := len(dah.RowRoots) - if row >= upperBound || col >= upperBound { - err := share.ErrOutOfBounds - span.RecordError(err) - return nil, err - } - root, leaf := ipld.Translate(dah, row, col) - bs, err := sg.store.CARBlockstore(ctx, dah.Hash()) - if errors.Is(err, eds.ErrNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/store: failed to retrieve blockstore: %w", err) - } - defer func() { - if err := bs.Close(); err != nil { - log.Warnw("closing blockstore", "err", err) - } - }() - - // wrap the read-only CAR blockstore in a getter - blockGetter := eds.NewBlockGetter(bs) - s, err := ipld.GetShare(ctx, blockGetter, root, leaf, len(dah.RowRoots)) - if errors.Is(err, ipld.ErrNodeNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/store: failed to retrieve share: %w", err) - } - - return s, nil -} - -// GetEDS gets the EDS identified by the given root from the EDS store. -func (sg *StoreGetter) GetEDS( - ctx context.Context, header *header.ExtendedHeader, -) (data *rsmt2d.ExtendedDataSquare, err error) { - ctx, span := tracer.Start(ctx, "store/get-eds") - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - data, err = sg.store.Get(ctx, header.DAH.Hash()) - if errors.Is(err, eds.ErrNotFound) { - // convert error to satisfy getter interface contract - err = share.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("getter/store: failed to retrieve eds: %w", err) - } - return data, nil -} - -// GetSharesByNamespace gets all EDS shares in the given namespace from the EDS store through the -// corresponding CAR-level blockstore. -func (sg *StoreGetter) GetSharesByNamespace( - ctx context.Context, - header *header.ExtendedHeader, - namespace share.Namespace, -) (shares share.NamespacedShares, err error) { - ctx, span := tracer.Start(ctx, "store/get-shares-by-namespace", trace.WithAttributes( - attribute.String("namespace", namespace.String()), - )) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - ns, err := eds.RetrieveNamespaceFromStore(ctx, sg.store, header.DAH, namespace) - if err != nil { - return nil, fmt.Errorf("getter/store: %w", err) - } - return ns, nil -} diff --git a/share/ipld/blockserv.go b/share/ipld/blockserv.go index 2ed2a21c77..b7a9bf84e9 100644 --- a/share/ipld/blockserv.go +++ b/share/ipld/blockserv.go @@ -9,7 +9,7 @@ import ( ) // NewBlockservice constructs Blockservice for fetching NMTrees. -func NewBlockservice(bs blockstore.Blockstore, exchange exchange.Interface) blockservice.BlockService { +func NewBlockservice(bs blockstore.Blockstore, exchange exchange.SessionExchange) blockservice.BlockService { return blockservice.New(bs, exchange, blockservice.WithAllowlist(defaultAllowlist)) } diff --git a/share/ipld/corrupted_data_test.go b/share/ipld/corrupted_data_test.go deleted file mode 100644 index 0d0af6dd35..0000000000 --- a/share/ipld/corrupted_data_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package ipld_test - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/celestiaorg/celestia-node/header/headertest" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/availability/full" - availability_test "github.com/celestiaorg/celestia-node/share/availability/test" - "github.com/celestiaorg/celestia-node/share/getters" -) - -// sharesAvailableTimeout is an arbitrarily picked interval of time in which a TestNode is expected -// to be able to complete a SharesAvailable request from a connected peer in a TestDagNet. -const sharesAvailableTimeout = 2 * time.Second - -// TestNamespaceHasher_CorruptedData is an integration test that verifies that the NamespaceHasher -// of a recipient of corrupted data will not panic, and will throw away the corrupted data. -func TestNamespaceHasher_CorruptedData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - net := availability_test.NewTestDAGNet(ctx, t) - - requester := full.Node(net) - provider, mockBS := availability_test.MockNode(t, net) - provider.Availability = full.TestAvailability(t, getters.NewIPLDGetter(provider.BlockService)) - net.ConnectAll() - - // before the provider starts attacking, we should be able to retrieve successfully. We pass a size - // 16 block, but this is not important to the test and any valid block size behaves the same. - root := availability_test.RandFillBS(t, 16, provider.BlockService) - - eh := headertest.RandExtendedHeaderWithRoot(t, root) - getCtx, cancelGet := context.WithTimeout(ctx, sharesAvailableTimeout) - t.Cleanup(cancelGet) - err := requester.SharesAvailable(getCtx, eh) - require.NoError(t, err) - - // clear the storage of the requester so that it must retrieve again, then start attacking - // we reinitialize the node to clear the eds store - requester = full.Node(net) - mockBS.Attacking = true - getCtx, cancelGet = context.WithTimeout(ctx, sharesAvailableTimeout) - t.Cleanup(cancelGet) - err = requester.SharesAvailable(getCtx, eh) - require.ErrorIs(t, err, share.ErrNotAvailable) -} diff --git a/share/ipld/delete_test.go b/share/ipld/delete_test.go index 00e6958285..745c938822 100644 --- a/share/ipld/delete_test.go +++ b/share/ipld/delete_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/boxo/exchange/offline" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" @@ -70,7 +71,11 @@ func TestDeleteNode_Sample(t *testing.T) { require.NoError(t, err) bstore := blockstore.NewBlockstore(sync.MutexWrap(datastore.NewMapDatastore())) - light := NewBlockservice(bstore, offline.Exchange(full.Blockstore())) + exch := &fakeSessionExchange{ + Interface: offline.Exchange(full.Blockstore()), + session: offline.Exchange(full.Blockstore()), + } + light := NewBlockservice(bstore, exch) cid := MustCidFromNamespacedSha256(rowRoots[0]) _, err = GetShare(ctx, light, cid, 0, len(rowRoots)) @@ -99,3 +104,17 @@ func TestDeleteNode_Sample(t *testing.T) { } require.Zero(t, postDeleteCount) } + +var _ exchange.SessionExchange = (*fakeSessionExchange)(nil) + +type fakeSessionExchange struct { + exchange.Interface + session exchange.Fetcher +} + +func (fe *fakeSessionExchange) NewSession(ctx context.Context) exchange.Fetcher { + if ctx == nil { + panic("nil context") + } + return fe.session +} diff --git a/share/ipld/get.go b/share/ipld/get.go index fbf0e4c3f2..9e85f7ccc9 100644 --- a/share/ipld/get.go +++ b/share/ipld/get.go @@ -157,46 +157,6 @@ func GetLeaves(ctx context.Context, wg.Wait() } -// GetProof fetches and returns the leaf's Merkle Proof. -// It walks down the IPLD NMT tree until it reaches the leaf and returns collected proof -func GetProof( - ctx context.Context, - bGetter blockservice.BlockGetter, - root cid.Cid, - proof []cid.Cid, - leaf, total int, -) ([]cid.Cid, error) { - // request the node - nd, err := GetNode(ctx, bGetter, root) - if err != nil { - return nil, err - } - // look for links - lnks := nd.Links() - if len(lnks) == 0 { - p := make([]cid.Cid, len(proof)) - copy(p, proof) - return p, nil - } - - // route walk to appropriate children - total /= 2 // as we are using binary tree, every step decreases total leaves in a half - if leaf < total { - root = lnks[0].Cid // if target leave on the left, go with walk down the first children - proof = append(proof, lnks[1].Cid) - } else { - root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second - proof, err = GetProof(ctx, bGetter, root, proof, leaf, total) - if err != nil { - return nil, err - } - return append(proof, lnks[0].Cid), nil - } - - // recursively walk down through selected children - return GetProof(ctx, bGetter, root, proof, leaf, total) -} - // chanGroup implements an atomic wait group, closing a jobs chan // when fully done. type chanGroup struct { diff --git a/share/ipld/get_shares.go b/share/ipld/get_shares.go index 1640720b48..773e09c7d0 100644 --- a/share/ipld/get_shares.go +++ b/share/ipld/get_shares.go @@ -44,12 +44,13 @@ func GetShares(ctx context.Context, bg blockservice.BlockGetter, root cid.Cid, s func GetSharesByNamespace( ctx context.Context, bGetter blockservice.BlockGetter, - root cid.Cid, + root []byte, namespace share.Namespace, maxShares int, ) ([]share.Share, *nmt.Proof, error) { + rootCid := MustCidFromNamespacedSha256(root) data := NewNamespaceData(maxShares, namespace, WithLeaves(), WithProofs()) - err := data.CollectLeavesByNamespace(ctx, bGetter, root) + err := data.CollectLeavesByNamespace(ctx, bGetter, rootCid) if err != nil { return nil, nil, err } diff --git a/share/ipld/get_shares_test.go b/share/ipld/get_shares_test.go index 0e150dc811..220c00d015 100644 --- a/share/ipld/get_shares_test.go +++ b/share/ipld/get_shares_test.go @@ -172,8 +172,7 @@ func TestGetSharesByNamespace(t *testing.T) { rowRoots, err := eds.RowRoots() require.NoError(t, err) for _, row := range rowRoots { - rcid := MustCidFromNamespacedSha256(row) - rowShares, _, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + rowShares, _, err := GetSharesByNamespace(ctx, bServ, row, namespace, len(rowRoots)) if errors.Is(err, ErrNamespaceOutsideRange) { continue } @@ -361,8 +360,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { rowRoots, err := eds.RowRoots() require.NoError(t, err) for _, row := range rowRoots { - rcid := MustCidFromNamespacedSha256(row) - rowShares, proof, err := GetSharesByNamespace(ctx, bServ, rcid, namespace, len(rowRoots)) + rowShares, proof, err := GetSharesByNamespace(ctx, bServ, row, namespace, len(rowRoots)) if namespace.IsOutsideRange(row, row) { require.ErrorIs(t, err, ErrNamespaceOutsideRange) continue @@ -384,7 +382,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { share.NewSHA256Hasher(), namespace.ToNMT(), leaves, - NamespacedSha256FromCID(rcid)) + row) require.True(t, verified) // verify inclusion @@ -392,7 +390,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { share.NewSHA256Hasher(), namespace.ToNMT(), rowShares, - NamespacedSha256FromCID(rcid)) + row) require.True(t, verified) } } diff --git a/share/ipld/namespace_data.go b/share/ipld/namespace_data.go index 1d8b71ec24..13cd958b47 100644 --- a/share/ipld/namespace_data.go +++ b/share/ipld/namespace_data.go @@ -14,10 +14,10 @@ import ( "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" ) -var ErrNamespaceOutsideRange = errors.New("share/ipld: " + - "target namespace is outside of namespace range for the given root") +var ErrNamespaceOutsideRange = shwap.ErrNamespaceOutsideRange // Option is the functional option that is applied to the NamespaceData instance // to configure data that needs to be stored. diff --git a/share/ipld/nmt.go b/share/ipld/nmt.go index 83c94bde12..0a433372b2 100644 --- a/share/ipld/nmt.go +++ b/share/ipld/nmt.go @@ -17,7 +17,6 @@ import ( mhcore "github.com/multiformats/go-multihash/core" "github.com/celestiaorg/celestia-app/v2/pkg/appconsts" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/share" @@ -158,12 +157,12 @@ func MustCidFromNamespacedSha256(hash []byte) cid.Cid { // Translate transforms square coordinates into IPLD NMT tree path to a leaf node. // It also adds randomization to evenly spread fetching from Rows and Columns. -func Translate(dah *da.DataAvailabilityHeader, row, col int) (cid.Cid, int) { +func Translate(roots *share.AxisRoots, row, col int) (cid.Cid, int) { if rand.Intn(2) == 0 { //nolint:gosec - return MustCidFromNamespacedSha256(dah.ColumnRoots[col]), row + return MustCidFromNamespacedSha256(roots.ColumnRoots[col]), row } - return MustCidFromNamespacedSha256(dah.RowRoots[row]), col + return MustCidFromNamespacedSha256(roots.RowRoots[row]), col } // NamespacedSha256FromCID derives the Namespaced hash from the given CID. diff --git a/share/ipld/nmt_adder.go b/share/ipld/nmt_adder.go index 7ce52859b2..f5065df224 100644 --- a/share/ipld/nmt_adder.go +++ b/share/ipld/nmt_adder.go @@ -103,13 +103,15 @@ func BatchSize(squareSize int) int { // ProofsAdder is used to collect proof nodes, while traversing merkle tree type ProofsAdder struct { - lock sync.RWMutex - proofs map[cid.Cid][]byte + lock sync.RWMutex + collectShares bool + proofs map[cid.Cid][]byte } // NewProofsAdder creates new instance of ProofsAdder. -func NewProofsAdder(squareSize int) *ProofsAdder { +func NewProofsAdder(squareSize int, collectShares bool) *ProofsAdder { return &ProofsAdder{ + collectShares: collectShares, // preallocate map to fit all inner nodes for given square size proofs: make(map[cid.Cid][]byte, innerNodesAmount(squareSize)), } @@ -156,7 +158,7 @@ func (a *ProofsAdder) VisitFn() nmt.NodeVisitorFn { if len(a.proofs) > 0 { return nil } - return a.visitInnerNodes + return a.visitNodes } // Purge removed proofs from ProofsAdder allowing GC to collect the memory @@ -171,10 +173,13 @@ func (a *ProofsAdder) Purge() { a.proofs = nil } -func (a *ProofsAdder) visitInnerNodes(hash []byte, children ...[]byte) { +func (a *ProofsAdder) visitNodes(hash []byte, children ...[]byte) { switch len(children) { case 1: - break + if a.collectShares { + id := MustCidFromNamespacedSha256(hash) + a.addProof(id, children[0]) + } case 2: id := MustCidFromNamespacedSha256(hash) a.addProof(id, append(children[0], children[1]...)) diff --git a/share/ipld/nmt_test.go b/share/ipld/nmt_test.go index 76d67bd2a8..040c68ec1d 100644 --- a/share/ipld/nmt_test.go +++ b/share/ipld/nmt_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-app/v2/pkg/da" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" ) @@ -26,10 +26,10 @@ func TestNamespaceFromCID(t *testing.T) { for i, tt := range tests { t.Run(strconv.Itoa(i), func(t *testing.T) { - dah, err := da.NewDataAvailabilityHeader(tt.eds) + roots, err := share.NewAxisRoots(tt.eds) require.NoError(t, err) // check to make sure NamespacedHash is correctly derived from CID - for _, row := range dah.RowRoots { + for _, row := range roots.RowRoots { c, err := CidFromNamespacedSha256(row) require.NoError(t, err) diff --git a/share/ipld/proofs.go b/share/ipld/proofs.go new file mode 100644 index 0000000000..286e817d95 --- /dev/null +++ b/share/ipld/proofs.go @@ -0,0 +1,74 @@ +package ipld + +import ( + "context" + "math" + + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/nmt" +) + +// GetProof fetches and returns the leaf's Merkle Proof. +// It walks down the IPLD NMT tree until it reaches the leaf and returns collected proof +func GetProof( + ctx context.Context, + bGetter blockservice.BlockGetter, + root []byte, + shareIdx, + total int, +) (nmt.Proof, error) { + rootCid := MustCidFromNamespacedSha256(root) + proofPath := make([]cid.Cid, 0, int(math.Sqrt(float64(total)))) + proofPath, err := getProof(ctx, bGetter, rootCid, proofPath, shareIdx, total) + if err != nil { + return nmt.Proof{}, err + } + + rangeProofs := make([][]byte, 0, len(proofPath)) + for i := len(proofPath) - 1; i >= 0; i-- { + node := NamespacedSha256FromCID(proofPath[i]) + rangeProofs = append(rangeProofs, node) + } + + return nmt.NewInclusionProof(shareIdx, shareIdx+1, rangeProofs, true), nil +} + +func getProof( + ctx context.Context, + bGetter blockservice.BlockGetter, + root cid.Cid, + proof []cid.Cid, + leaf, total int, +) ([]cid.Cid, error) { + // request the node + nd, err := GetNode(ctx, bGetter, root) + if err != nil { + return nil, err + } + // look for links + lnks := nd.Links() + if len(lnks) == 0 { + p := make([]cid.Cid, len(proof)) + copy(p, proof) + return p, nil + } + + // route walk to appropriate children + total /= 2 // as we are using binary tree, every step decreases total leaves in a half + if leaf < total { + root = lnks[0].Cid // if target leave on the left, go with walk down the first children + proof = append(proof, lnks[1].Cid) + } else { + root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second + proof, err = getProof(ctx, bGetter, root, proof, leaf, total) + if err != nil { + return nil, err + } + return append(proof, lnks[0].Cid), nil + } + + // recursively walk down through selected children + return getProof(ctx, bGetter, root, proof, leaf, total) +} diff --git a/share/ipld/proofs_test.go b/share/ipld/proofs_test.go new file mode 100644 index 0000000000..7ff438e577 --- /dev/null +++ b/share/ipld/proofs_test.go @@ -0,0 +1,65 @@ +package ipld + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestGetProof(t *testing.T) { + const width = 8 + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + bServ := NewMemBlockservice() + + shares := sharetest.RandShares(t, width*width) + in, err := AddShares(ctx, shares, bServ) + require.NoError(t, err) + + axisRoots, err := share.NewAxisRoots(in) + require.NoError(t, err) + + for _, proofType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + var roots [][]byte + switch proofType { + case rsmt2d.Row: + roots = axisRoots.RowRoots + case rsmt2d.Col: + roots = axisRoots.ColumnRoots + } + for axisIdx := 0; axisIdx < width*2; axisIdx++ { + root := roots[axisIdx] + for shrIdx := 0; shrIdx < width*2; shrIdx++ { + proof, err := GetProof(ctx, bServ, root, shrIdx, int(in.Width())) + require.NoError(t, err) + rootCid := MustCidFromNamespacedSha256(root) + node, err := GetLeaf(ctx, bServ, rootCid, shrIdx, int(in.Width())) + require.NoError(t, err) + + sample := shwap.Sample{ + Share: share.GetData(node.RawData()), + Proof: &proof, + ProofType: proofType, + } + var rowIdx, colIdx int + switch proofType { + case rsmt2d.Row: + rowIdx, colIdx = axisIdx, shrIdx + case rsmt2d.Col: + rowIdx, colIdx = shrIdx, axisIdx + } + err = sample.Verify(axisRoots, rowIdx, colIdx) + require.NoError(t, err) + } + } + } +} diff --git a/share/ipld/utils.go b/share/ipld/utils.go index d3e987e7f3..b74d1d937d 100644 --- a/share/ipld/utils.go +++ b/share/ipld/utils.go @@ -6,8 +6,9 @@ import ( "github.com/celestiaorg/celestia-node/share" ) -// FilterRootByNamespace returns the row roots from the given share.Root that contain the namespace. -func FilterRootByNamespace(root *share.Root, namespace share.Namespace) []cid.Cid { +// FilterRootByNamespace returns the row roots from the given share.AxisRoots that contain the +// namespace. +func FilterRootByNamespace(root *share.AxisRoots, namespace share.Namespace) []cid.Cid { rowRootCIDs := make([]cid.Cid, 0, len(root.RowRoots)) for _, row := range root.RowRoots { if !namespace.IsOutsideRange(row, row) { diff --git a/share/namespace.go b/share/namespace.go index 24188ecb9b..8e1c5b4bb1 100644 --- a/share/namespace.go +++ b/share/namespace.go @@ -2,7 +2,9 @@ package share import ( "bytes" + "encoding/binary" "encoding/hex" + "errors" "fmt" appns "github.com/celestiaorg/go-square/namespace" @@ -182,3 +184,49 @@ func (n Namespace) IsGreater(target Namespace) bool { func (n Namespace) IsGreaterOrEqualThan(target Namespace) bool { return bytes.Compare(n, target) > -1 } + +// AddInt adds arbitrary int value to namespace, treating namespace as big-endian +// implementation of int +func (n Namespace) AddInt(val int) (Namespace, error) { + if val == 0 { + return n, nil + } + // Convert the input integer to a byte slice and add it to result slice + result := make([]byte, len(n)) + if val > 0 { + binary.BigEndian.PutUint64(result[len(n)-8:], uint64(val)) + } else { + binary.BigEndian.PutUint64(result[len(n)-8:], uint64(-val)) + } + + // Perform addition byte by byte + var carry int + for i := len(n) - 1; i >= 0; i-- { + var sum int + if val > 0 { + sum = int(n[i]) + int(result[i]) + carry + } else { + sum = int(n[i]) - int(result[i]) + carry + } + + switch { + case sum > 255: + carry = 1 + sum -= 256 + case sum < 0: + carry = -1 + sum += 256 + default: + carry = 0 + } + + result[i] = uint8(sum) + } + + // Handle any remaining carry + if carry != 0 { + return nil, errors.New("namespace overflow") + } + + return result, nil +} diff --git a/share/p2p/shrexeds/pb/extended_data_square.pb.go b/share/p2p/shrexeds/pb/extended_data_square.pb.go deleted file mode 100644 index ed1a96ae3b..0000000000 --- a/share/p2p/shrexeds/pb/extended_data_square.pb.go +++ /dev/null @@ -1,509 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: share/p2p/shrexeds/pb/extended_data_square.proto - -package extended_data_square - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Status int32 - -const ( - Status_INVALID Status = 0 - Status_OK Status = 1 - Status_NOT_FOUND Status = 2 - Status_INTERNAL Status = 3 -) - -var Status_name = map[int32]string{ - 0: "INVALID", - 1: "OK", - 2: "NOT_FOUND", - 3: "INTERNAL", -} - -var Status_value = map[string]int32{ - "INVALID": 0, - "OK": 1, - "NOT_FOUND": 2, - "INTERNAL": 3, -} - -func (x Status) String() string { - return proto.EnumName(Status_name, int32(x)) -} - -func (Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_49d42aa96098056e, []int{0} -} - -type EDSRequest struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *EDSRequest) Reset() { *m = EDSRequest{} } -func (m *EDSRequest) String() string { return proto.CompactTextString(m) } -func (*EDSRequest) ProtoMessage() {} -func (*EDSRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_49d42aa96098056e, []int{0} -} -func (m *EDSRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EDSRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EDSRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EDSRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_EDSRequest.Merge(m, src) -} -func (m *EDSRequest) XXX_Size() int { - return m.Size() -} -func (m *EDSRequest) XXX_DiscardUnknown() { - xxx_messageInfo_EDSRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_EDSRequest proto.InternalMessageInfo - -func (m *EDSRequest) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -type EDSResponse struct { - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=Status" json:"status,omitempty"` -} - -func (m *EDSResponse) Reset() { *m = EDSResponse{} } -func (m *EDSResponse) String() string { return proto.CompactTextString(m) } -func (*EDSResponse) ProtoMessage() {} -func (*EDSResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_49d42aa96098056e, []int{1} -} -func (m *EDSResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EDSResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EDSResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EDSResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_EDSResponse.Merge(m, src) -} -func (m *EDSResponse) XXX_Size() int { - return m.Size() -} -func (m *EDSResponse) XXX_DiscardUnknown() { - xxx_messageInfo_EDSResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_EDSResponse proto.InternalMessageInfo - -func (m *EDSResponse) GetStatus() Status { - if m != nil { - return m.Status - } - return Status_INVALID -} - -func init() { - proto.RegisterEnum("Status", Status_name, Status_value) - proto.RegisterType((*EDSRequest)(nil), "EDSRequest") - proto.RegisterType((*EDSResponse)(nil), "EDSResponse") -} - -func init() { - proto.RegisterFile("share/p2p/shrexeds/pb/extended_data_square.proto", fileDescriptor_49d42aa96098056e) -} - -var fileDescriptor_49d42aa96098056e = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x28, 0xce, 0x48, 0x2c, - 0x4a, 0xd5, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, 0xad, 0x48, 0x4d, 0x29, 0xd6, 0x2f, - 0x48, 0xd2, 0x4f, 0xad, 0x28, 0x49, 0xcd, 0x4b, 0x49, 0x4d, 0x89, 0x4f, 0x49, 0x2c, 0x49, 0x8c, - 0x2f, 0x2e, 0x2c, 0x4d, 0x2c, 0x4a, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x52, 0xe0, 0xe2, - 0x72, 0x75, 0x09, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, 0xe2, 0x62, 0xc9, 0x48, - 0x2c, 0xce, 0x90, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xb8, 0xc1, - 0x2a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe4, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, - 0x8b, 0xc1, 0x8a, 0xf8, 0x8c, 0xd8, 0xf5, 0x82, 0xc1, 0xdc, 0x20, 0xa8, 0xb0, 0x96, 0x15, 0x17, - 0x1b, 0x44, 0x44, 0x88, 0x9b, 0x8b, 0xdd, 0xd3, 0x2f, 0xcc, 0xd1, 0xc7, 0xd3, 0x45, 0x80, 0x41, - 0x88, 0x8d, 0x8b, 0xc9, 0xdf, 0x5b, 0x80, 0x51, 0x88, 0x97, 0x8b, 0xd3, 0xcf, 0x3f, 0x24, 0xde, - 0xcd, 0x3f, 0xd4, 0xcf, 0x45, 0x80, 0x49, 0x88, 0x87, 0x8b, 0xc3, 0xd3, 0x2f, 0xc4, 0x35, 0xc8, - 0xcf, 0xd1, 0x47, 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, - 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, - 0x92, 0xd8, 0xc0, 0xce, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x1d, 0xd4, 0xa7, 0xe2, - 0x00, 0x00, 0x00, -} - -func (m *EDSRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EDSRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EDSRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintExtendedDataSquare(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EDSResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EDSResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EDSResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Status != 0 { - i = encodeVarintExtendedDataSquare(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintExtendedDataSquare(dAtA []byte, offset int, v uint64) int { - offset -= sovExtendedDataSquare(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *EDSRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovExtendedDataSquare(uint64(l)) - } - return n -} - -func (m *EDSResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != 0 { - n += 1 + sovExtendedDataSquare(uint64(m.Status)) - } - return n -} - -func sovExtendedDataSquare(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozExtendedDataSquare(x uint64) (n int) { - return sovExtendedDataSquare(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *EDSRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EDSRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EDSRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthExtendedDataSquare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthExtendedDataSquare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipExtendedDataSquare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthExtendedDataSquare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EDSResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EDSResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EDSResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipExtendedDataSquare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthExtendedDataSquare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipExtendedDataSquare(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowExtendedDataSquare - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthExtendedDataSquare - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupExtendedDataSquare - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthExtendedDataSquare - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthExtendedDataSquare = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowExtendedDataSquare = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupExtendedDataSquare = fmt.Errorf("proto: unexpected end of group") -) diff --git a/share/p2p/shrexeds/server.go b/share/p2p/shrexeds/server.go deleted file mode 100644 index 15d67d2111..0000000000 --- a/share/p2p/shrexeds/server.go +++ /dev/null @@ -1,202 +0,0 @@ -package shrexeds - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" - "go.uber.org/zap" - - "github.com/celestiaorg/go-libp2p-messenger/serde" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/p2p" - p2p_pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" -) - -// Server is responsible for serving ODSs for blocksync over the ShrEx/EDS protocol. -type Server struct { - ctx context.Context - cancel context.CancelFunc - - host host.Host - protocolID protocol.ID - - store *eds.Store - - params *Parameters - middleware *p2p.Middleware - metrics *p2p.Metrics -} - -// NewServer creates a new ShrEx/EDS server. -func NewServer(params *Parameters, host host.Host, store *eds.Store) (*Server, error) { - if err := params.Validate(); err != nil { - return nil, fmt.Errorf("shrex-eds: server creation failed: %w", err) - } - - return &Server{ - host: host, - store: store, - protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), - params: params, - middleware: p2p.NewMiddleware(params.ConcurrencyLimit), - }, nil -} - -func (s *Server) Start(context.Context) error { - s.ctx, s.cancel = context.WithCancel(context.Background()) - handler := s.handleStream - withRateLimit := s.middleware.RateLimitHandler(handler) - withRecovery := p2p.RecoveryMiddleware(withRateLimit) - s.host.SetStreamHandler(s.protocolID, withRecovery) - return nil -} - -func (s *Server) Stop(context.Context) error { - defer s.cancel() - s.host.RemoveStreamHandler(s.protocolID) - return nil -} - -func (s *Server) observeRateLimitedRequests() { - numRateLimited := s.middleware.DrainCounter() - if numRateLimited > 0 { - s.metrics.ObserveRequests(context.Background(), numRateLimited, p2p.StatusRateLimited) - } -} - -func (s *Server) handleStream(stream network.Stream) { - logger := log.With("peer", stream.Conn().RemotePeer().String()) - logger.Debug("server: handling eds request") - - s.observeRateLimitedRequests() - - // read request from stream to get the dataHash for store lookup - req, err := s.readRequest(logger, stream) - if err != nil { - logger.Warnw("server: reading request from stream", "err", err) - stream.Reset() //nolint:errcheck - return - } - - // ensure the requested dataHash is a valid root - hash := share.DataHash(req.Hash) - err = hash.Validate() - if err != nil { - logger.Warnw("server: invalid request", "err", err) - stream.Reset() //nolint:errcheck - return - } - logger = logger.With("hash", hash.String()) - - ctx, cancel := context.WithTimeout(s.ctx, s.params.HandleRequestTimeout) - defer cancel() - - // determine whether the EDS is available in our store - // we do not close the reader, so that other requests will not need to re-open the file. - // closing is handled by the LRU cache. - edsReader, err := s.store.GetCAR(ctx, hash) - var status p2p_pb.Status - switch { - case err == nil: - defer func() { - if err := edsReader.Close(); err != nil { - log.Warnw("closing car reader", "err", err) - } - }() - status = p2p_pb.Status_OK - case errors.Is(err, eds.ErrNotFound): - logger.Warnw("server: request hash not found") - s.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) - status = p2p_pb.Status_NOT_FOUND - case err != nil: - logger.Errorw("server: get CAR", "err", err) - status = p2p_pb.Status_INTERNAL - } - - // inform the client of our status - err = s.writeStatus(logger, status, stream) - if err != nil { - logger.Warnw("server: writing status to stream", "err", err) - stream.Reset() //nolint:errcheck - return - } - // if we cannot serve the EDS, we are already done - if status != p2p_pb.Status_OK { - err = stream.Close() - if err != nil { - logger.Debugw("server: closing stream", "err", err) - } - return - } - - // start streaming the ODS to the client - err = s.writeODS(logger, edsReader, stream) - if err != nil { - logger.Warnw("server: writing ods to stream", "err", err) - stream.Reset() //nolint:errcheck - return - } - - s.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) - err = stream.Close() - if err != nil { - logger.Debugw("server: closing stream", "err", err) - } -} - -func (s *Server) readRequest(logger *zap.SugaredLogger, stream network.Stream) (*p2p_pb.EDSRequest, error) { - err := stream.SetReadDeadline(time.Now().Add(s.params.ServerReadTimeout)) - if err != nil { - logger.Debugw("server: set read deadline", "err", err) - } - - req := new(p2p_pb.EDSRequest) - _, err = serde.Read(stream, req) - if err != nil { - return nil, err - } - err = stream.CloseRead() - if err != nil { - logger.Debugw("server: closing read", "err", err) - } - - return req, nil -} - -func (s *Server) writeStatus(logger *zap.SugaredLogger, status p2p_pb.Status, stream network.Stream) error { - err := stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) - if err != nil { - logger.Debugw("server: set write deadline", "err", err) - } - - resp := &p2p_pb.EDSResponse{Status: status} - _, err = serde.Write(stream, resp) - return err -} - -func (s *Server) writeODS(logger *zap.SugaredLogger, edsReader io.Reader, stream network.Stream) error { - err := stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) - if err != nil { - logger.Debugw("server: set read deadline", "err", err) - } - - odsReader, err := eds.ODSReader(edsReader) - if err != nil { - return fmt.Errorf("creating ODS reader: %w", err) - } - buf := make([]byte, s.params.BufferSize) - _, err = io.CopyBuffer(stream, odsReader, buf) - if err != nil { - return fmt.Errorf("writing ODS bytes: %w", err) - } - - return nil -} diff --git a/share/p2p/shrexnd/pb/share.pb.go b/share/p2p/shrexnd/pb/share.pb.go deleted file mode 100644 index 7e3c11416f..0000000000 --- a/share/p2p/shrexnd/pb/share.pb.go +++ /dev/null @@ -1,801 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: share/p2p/shrexnd/pb/share.proto - -package share_p2p_shrex_nd - -import ( - fmt "fmt" - pb "github.com/celestiaorg/nmt/pb" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type StatusCode int32 - -const ( - StatusCode_INVALID StatusCode = 0 - StatusCode_OK StatusCode = 1 - StatusCode_NOT_FOUND StatusCode = 2 - StatusCode_INTERNAL StatusCode = 3 -) - -var StatusCode_name = map[int32]string{ - 0: "INVALID", - 1: "OK", - 2: "NOT_FOUND", - 3: "INTERNAL", -} - -var StatusCode_value = map[string]int32{ - "INVALID": 0, - "OK": 1, - "NOT_FOUND": 2, - "INTERNAL": 3, -} - -func (x StatusCode) String() string { - return proto.EnumName(StatusCode_name, int32(x)) -} - -func (StatusCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ed9f13149b0de397, []int{0} -} - -type GetSharesByNamespaceRequest struct { - RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` - Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` -} - -func (m *GetSharesByNamespaceRequest) Reset() { *m = GetSharesByNamespaceRequest{} } -func (m *GetSharesByNamespaceRequest) String() string { return proto.CompactTextString(m) } -func (*GetSharesByNamespaceRequest) ProtoMessage() {} -func (*GetSharesByNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ed9f13149b0de397, []int{0} -} -func (m *GetSharesByNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetSharesByNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetSharesByNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetSharesByNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSharesByNamespaceRequest.Merge(m, src) -} -func (m *GetSharesByNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *GetSharesByNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSharesByNamespaceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSharesByNamespaceRequest proto.InternalMessageInfo - -func (m *GetSharesByNamespaceRequest) GetRootHash() []byte { - if m != nil { - return m.RootHash - } - return nil -} - -func (m *GetSharesByNamespaceRequest) GetNamespace() []byte { - if m != nil { - return m.Namespace - } - return nil -} - -type GetSharesByNamespaceStatusResponse struct { - Status StatusCode `protobuf:"varint,1,opt,name=status,proto3,enum=share.p2p.shrex.nd.StatusCode" json:"status,omitempty"` -} - -func (m *GetSharesByNamespaceStatusResponse) Reset() { *m = GetSharesByNamespaceStatusResponse{} } -func (m *GetSharesByNamespaceStatusResponse) String() string { return proto.CompactTextString(m) } -func (*GetSharesByNamespaceStatusResponse) ProtoMessage() {} -func (*GetSharesByNamespaceStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ed9f13149b0de397, []int{1} -} -func (m *GetSharesByNamespaceStatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetSharesByNamespaceStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetSharesByNamespaceStatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetSharesByNamespaceStatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSharesByNamespaceStatusResponse.Merge(m, src) -} -func (m *GetSharesByNamespaceStatusResponse) XXX_Size() int { - return m.Size() -} -func (m *GetSharesByNamespaceStatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetSharesByNamespaceStatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSharesByNamespaceStatusResponse proto.InternalMessageInfo - -func (m *GetSharesByNamespaceStatusResponse) GetStatus() StatusCode { - if m != nil { - return m.Status - } - return StatusCode_INVALID -} - -type NamespaceRowResponse struct { - Shares [][]byte `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` - Proof *pb.Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` -} - -func (m *NamespaceRowResponse) Reset() { *m = NamespaceRowResponse{} } -func (m *NamespaceRowResponse) String() string { return proto.CompactTextString(m) } -func (*NamespaceRowResponse) ProtoMessage() {} -func (*NamespaceRowResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ed9f13149b0de397, []int{2} -} -func (m *NamespaceRowResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceRowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceRowResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NamespaceRowResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceRowResponse.Merge(m, src) -} -func (m *NamespaceRowResponse) XXX_Size() int { - return m.Size() -} -func (m *NamespaceRowResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceRowResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NamespaceRowResponse proto.InternalMessageInfo - -func (m *NamespaceRowResponse) GetShares() [][]byte { - if m != nil { - return m.Shares - } - return nil -} - -func (m *NamespaceRowResponse) GetProof() *pb.Proof { - if m != nil { - return m.Proof - } - return nil -} - -func init() { - proto.RegisterEnum("share.p2p.shrex.nd.StatusCode", StatusCode_name, StatusCode_value) - proto.RegisterType((*GetSharesByNamespaceRequest)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceRequest") - proto.RegisterType((*GetSharesByNamespaceStatusResponse)(nil), "share.p2p.shrex.nd.GetSharesByNamespaceStatusResponse") - proto.RegisterType((*NamespaceRowResponse)(nil), "share.p2p.shrex.nd.NamespaceRowResponse") -} - -func init() { proto.RegisterFile("share/p2p/shrexnd/pb/share.proto", fileDescriptor_ed9f13149b0de397) } - -var fileDescriptor_ed9f13149b0de397 = []byte{ - // 326 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0x93, 0x96, 0x37, 0x6f, 0x3b, 0xad, 0x35, 0x2c, 0x22, 0xc5, 0xca, 0x52, 0x02, 0x42, - 0xf1, 0xb0, 0x81, 0x08, 0x1e, 0x85, 0xd6, 0xfa, 0xa7, 0x58, 0x52, 0xd9, 0xb6, 0xe2, 0x41, 0x28, - 0x1b, 0xbb, 0x92, 0x8b, 0xd9, 0x35, 0xbb, 0x45, 0xfd, 0x16, 0x7e, 0x2c, 0x8f, 0x3d, 0x7a, 0x94, - 0xf6, 0x8b, 0x48, 0xb6, 0xd1, 0x1c, 0xf4, 0xb6, 0xf3, 0xcc, 0x33, 0xbf, 0x7d, 0x66, 0xa0, 0xad, - 0x62, 0x96, 0x72, 0x5f, 0x06, 0xd2, 0x57, 0x71, 0xca, 0x5f, 0x92, 0xb9, 0x2f, 0x23, 0xdf, 0x88, - 0x44, 0xa6, 0x42, 0x0b, 0x84, 0xf2, 0x22, 0x90, 0xc4, 0x38, 0x48, 0x32, 0xdf, 0x6b, 0xc8, 0xc8, - 0x97, 0xa9, 0x10, 0x0f, 0x1b, 0x8f, 0x77, 0x0b, 0xad, 0x0b, 0xae, 0xc7, 0x99, 0x51, 0xf5, 0x5e, - 0x43, 0xf6, 0xc8, 0x95, 0x64, 0xf7, 0x9c, 0xf2, 0xa7, 0x05, 0x57, 0x1a, 0xb5, 0xa0, 0x9a, 0x0a, - 0xa1, 0x67, 0x31, 0x53, 0x71, 0xd3, 0x6e, 0xdb, 0x9d, 0x3a, 0xad, 0x64, 0xc2, 0x25, 0x53, 0x31, - 0xda, 0x87, 0x6a, 0xf2, 0x3d, 0xd0, 0x2c, 0x99, 0x66, 0x21, 0x78, 0x77, 0xe0, 0xfd, 0x45, 0x1e, - 0x6b, 0xa6, 0x17, 0x8a, 0x72, 0x25, 0x45, 0xa2, 0x38, 0x3a, 0x06, 0x47, 0x19, 0xc5, 0xd0, 0x1b, - 0x01, 0x26, 0xbf, 0x43, 0x93, 0xcd, 0xcc, 0xa9, 0x98, 0x73, 0x9a, 0xbb, 0xbd, 0x29, 0xec, 0x14, - 0x61, 0xc5, 0xf3, 0x0f, 0x6f, 0x17, 0x1c, 0x03, 0xc8, 0x78, 0xe5, 0x4e, 0x9d, 0xe6, 0x15, 0x3a, - 0x80, 0x7f, 0x66, 0x6d, 0x93, 0xb3, 0x16, 0x6c, 0x93, 0xfc, 0x08, 0x11, 0xb9, 0xce, 0x1e, 0x74, - 0xd3, 0x3d, 0x3c, 0x01, 0x28, 0x3e, 0x43, 0x35, 0xf8, 0x3f, 0x08, 0x6f, 0xba, 0xc3, 0x41, 0xdf, - 0xb5, 0x90, 0x03, 0xa5, 0xd1, 0x95, 0x6b, 0xa3, 0x2d, 0xa8, 0x86, 0xa3, 0xc9, 0xec, 0x7c, 0x34, - 0x0d, 0xfb, 0x6e, 0x09, 0xd5, 0xa1, 0x32, 0x08, 0x27, 0x67, 0x34, 0xec, 0x0e, 0xdd, 0x72, 0xaf, - 0xf9, 0xbe, 0xc2, 0xf6, 0x72, 0x85, 0xed, 0xcf, 0x15, 0xb6, 0xdf, 0xd6, 0xd8, 0x5a, 0xae, 0xb1, - 0xf5, 0xb1, 0xc6, 0x56, 0xe4, 0x98, 0x7b, 0x1f, 0x7d, 0x05, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x53, - 0xb4, 0x86, 0xb7, 0x01, 0x00, 0x00, -} - -func (m *GetSharesByNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetSharesByNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetSharesByNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintShare(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - } - if len(m.RootHash) > 0 { - i -= len(m.RootHash) - copy(dAtA[i:], m.RootHash) - i = encodeVarintShare(dAtA, i, uint64(len(m.RootHash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetSharesByNamespaceStatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetSharesByNamespaceStatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetSharesByNamespaceStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Status != 0 { - i = encodeVarintShare(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *NamespaceRowResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceRowResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceRowResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShare(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Shares) > 0 { - for iNdEx := len(m.Shares) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Shares[iNdEx]) - copy(dAtA[i:], m.Shares[iNdEx]) - i = encodeVarintShare(dAtA, i, uint64(len(m.Shares[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintShare(dAtA []byte, offset int, v uint64) int { - offset -= sovShare(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GetSharesByNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.RootHash) - if l > 0 { - n += 1 + l + sovShare(uint64(l)) - } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovShare(uint64(l)) - } - return n -} - -func (m *GetSharesByNamespaceStatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != 0 { - n += 1 + sovShare(uint64(m.Status)) - } - return n -} - -func (m *NamespaceRowResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Shares) > 0 { - for _, b := range m.Shares { - l = len(b) - n += 1 + l + sovShare(uint64(l)) - } - } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovShare(uint64(l)) - } - return n -} - -func sovShare(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozShare(x uint64) (n int) { - return sovShare(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GetSharesByNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSharesByNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSharesByNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) - if m.RootHash == nil { - m.RootHash = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = append(m.Namespace[:0], dAtA[iNdEx:postIndex]...) - if m.Namespace == nil { - m.Namespace = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSharesByNamespaceStatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSharesByNamespaceStatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSharesByNamespaceStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= StatusCode(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceRowResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceRowResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceRowResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shares", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shares = append(m.Shares, make([]byte, postIndex-iNdEx)) - copy(m.Shares[len(m.Shares)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShare - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShare - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShare - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Proof == nil { - m.Proof = &pb.Proof{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShare(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShare - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipShare(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShare - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShare - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShare - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthShare - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupShare - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthShare - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthShare = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowShare = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupShare = fmt.Errorf("proto: unexpected end of group") -) diff --git a/share/p2p/shrexnd/pb/share.proto b/share/p2p/shrexnd/pb/share.proto deleted file mode 100644 index a5bdbfa071..0000000000 --- a/share/p2p/shrexnd/pb/share.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package share.p2p.shrex.nd; -import "pb/proof.proto"; - -message GetSharesByNamespaceRequest{ - bytes root_hash = 1; - bytes namespace = 2; -} - -message GetSharesByNamespaceStatusResponse{ - StatusCode status = 1; -} - -enum StatusCode { - INVALID = 0; - OK = 1; - NOT_FOUND = 2; - INTERNAL = 3; -}; - -message NamespaceRowResponse { - repeated bytes shares = 1; - proof.pb.Proof proof = 2; -} diff --git a/share/p2p/shrexnd/server.go b/share/p2p/shrexnd/server.go deleted file mode 100644 index 9773ad7327..0000000000 --- a/share/p2p/shrexnd/server.go +++ /dev/null @@ -1,257 +0,0 @@ -package shrexnd - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "time" - - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" - "go.uber.org/zap" - - "github.com/celestiaorg/go-libp2p-messenger/serde" - nmt_pb "github.com/celestiaorg/nmt/pb" - - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/p2p" - pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" -) - -// Server implements server side of shrex/nd protocol to serve namespaced share to remote -// peers. -type Server struct { - cancel context.CancelFunc - - host host.Host - protocolID protocol.ID - - handler network.StreamHandler - store *eds.Store - - params *Parameters - middleware *p2p.Middleware - metrics *p2p.Metrics -} - -// NewServer creates new Server -func NewServer(params *Parameters, host host.Host, store *eds.Store) (*Server, error) { - if err := params.Validate(); err != nil { - return nil, fmt.Errorf("shrex-nd: server creation failed: %w", err) - } - - srv := &Server{ - store: store, - host: host, - params: params, - protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), - middleware: p2p.NewMiddleware(params.ConcurrencyLimit), - } - - ctx, cancel := context.WithCancel(context.Background()) - srv.cancel = cancel - - handler := srv.streamHandler(ctx) - withRateLimit := srv.middleware.RateLimitHandler(handler) - withRecovery := p2p.RecoveryMiddleware(withRateLimit) - srv.handler = withRecovery - return srv, nil -} - -// Start starts the server -func (srv *Server) Start(context.Context) error { - srv.host.SetStreamHandler(srv.protocolID, srv.handler) - return nil -} - -// Stop stops the server -func (srv *Server) Stop(context.Context) error { - srv.cancel() - srv.host.RemoveStreamHandler(srv.protocolID) - return nil -} - -func (srv *Server) streamHandler(ctx context.Context) network.StreamHandler { - return func(s network.Stream) { - err := srv.handleNamespacedData(ctx, s) - if err != nil { - s.Reset() //nolint:errcheck - return - } - if err = s.Close(); err != nil { - log.Debugw("server: closing stream", "err", err) - } - } -} - -// SetHandler sets server handler -func (srv *Server) SetHandler(handler network.StreamHandler) { - srv.handler = handler -} - -func (srv *Server) observeRateLimitedRequests() { - numRateLimited := srv.middleware.DrainCounter() - if numRateLimited > 0 { - srv.metrics.ObserveRequests(context.Background(), numRateLimited, p2p.StatusRateLimited) - } -} - -func (srv *Server) handleNamespacedData(ctx context.Context, stream network.Stream) error { - logger := log.With("source", "server", "peer", stream.Conn().RemotePeer().String()) - logger.Debug("handling nd request") - - srv.observeRateLimitedRequests() - req, err := srv.readRequest(logger, stream) - if err != nil { - logger.Warnw("read request", "err", err) - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusBadRequest) - return err - } - - logger = logger.With("namespace", share.Namespace(req.Namespace).String(), - "hash", share.DataHash(req.RootHash).String()) - - ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) - defer cancel() - - shares, status, err := srv.getNamespaceData(ctx, req.RootHash, req.Namespace) - if err != nil { - // server should respond with status regardless if there was an error getting data - sendErr := srv.respondStatus(ctx, logger, stream, status) - if sendErr != nil { - logger.Errorw("sending response", "err", sendErr) - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) - } - logger.Errorw("handling request", "err", err) - return errors.Join(err, sendErr) - } - - err = srv.respondStatus(ctx, logger, stream, status) - if err != nil { - logger.Errorw("sending response", "err", err) - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) - return err - } - - err = srv.sendNamespacedShares(shares, stream) - if err != nil { - logger.Errorw("send nd data", "err", err) - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSendRespErr) - return err - } - return nil -} - -func (srv *Server) readRequest( - logger *zap.SugaredLogger, - stream network.Stream, -) (*pb.GetSharesByNamespaceRequest, error) { - err := stream.SetReadDeadline(time.Now().Add(srv.params.ServerReadTimeout)) - if err != nil { - logger.Debugw("setting read deadline", "err", err) - } - - var req pb.GetSharesByNamespaceRequest - _, err = serde.Read(stream, &req) - if err != nil { - return nil, fmt.Errorf("reading request: %w", err) - } - - logger.Debugw("new request") - err = stream.CloseRead() - if err != nil { - logger.Debugw("closing read side of the stream", "err", err) - } - - err = validateRequest(req) - if err != nil { - return nil, fmt.Errorf("invalid request: %w", err) - } - return &req, nil -} - -func (srv *Server) getNamespaceData(ctx context.Context, - hash share.DataHash, namespace share.Namespace, -) (share.NamespacedShares, pb.StatusCode, error) { - dah, err := srv.store.GetDAH(ctx, hash) - if err != nil { - if errors.Is(err, eds.ErrNotFound) { - return nil, pb.StatusCode_NOT_FOUND, nil - } - return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving DAH: %w", err) - } - - shares, err := eds.RetrieveNamespaceFromStore(ctx, srv.store, dah, namespace) - if err != nil { - return nil, pb.StatusCode_INTERNAL, fmt.Errorf("retrieving shares: %w", err) - } - - return shares, pb.StatusCode_OK, nil -} - -func (srv *Server) respondStatus( - ctx context.Context, - logger *zap.SugaredLogger, - stream network.Stream, - status pb.StatusCode, -) error { - srv.observeStatus(ctx, status) - - err := stream.SetWriteDeadline(time.Now().Add(srv.params.ServerWriteTimeout)) - if err != nil { - logger.Debugw("setting write deadline", "err", err) - } - - _, err = serde.Write(stream, &pb.GetSharesByNamespaceStatusResponse{Status: status}) - if err != nil { - return fmt.Errorf("writing response: %w", err) - } - - return nil -} - -// sendNamespacedShares encodes shares into proto messages and sends it to client -func (srv *Server) sendNamespacedShares(shares share.NamespacedShares, stream network.Stream) error { - for _, row := range shares { - row := &pb.NamespaceRowResponse{ - Shares: row.Shares, - Proof: &nmt_pb.Proof{ - Start: int64(row.Proof.Start()), - End: int64(row.Proof.End()), - Nodes: row.Proof.Nodes(), - LeafHash: row.Proof.LeafHash(), - IsMaxNamespaceIgnored: row.Proof.IsMaxNamespaceIDIgnored(), - }, - } - _, err := serde.Write(stream, row) - if err != nil { - return fmt.Errorf("writing nd data to stream: %w", err) - } - } - return nil -} - -func (srv *Server) observeStatus(ctx context.Context, status pb.StatusCode) { - switch { - case status == pb.StatusCode_OK: - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) - case status == pb.StatusCode_NOT_FOUND: - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) - case status == pb.StatusCode_INTERNAL: - srv.metrics.ObserveRequests(ctx, 1, p2p.StatusInternalErr) - } -} - -// validateRequest checks correctness of the request -func validateRequest(req pb.GetSharesByNamespaceRequest) error { - if err := share.Namespace(req.Namespace).ValidateForData(); err != nil { - return err - } - if len(req.RootHash) != sha256.Size { - return fmt.Errorf("incorrect root hash length: %v", len(req.RootHash)) - } - return nil -} diff --git a/share/root.go b/share/root.go new file mode 100644 index 0000000000..ee912c4df2 --- /dev/null +++ b/share/root.go @@ -0,0 +1,89 @@ +package share + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + + "github.com/celestiaorg/celestia-app/v2/pkg/da" + "github.com/celestiaorg/rsmt2d" +) + +const ( + // DataHashSize is the size of the DataHash. + DataHashSize = 32 + // AxisRootSize is the size of the single root in AxisRoots. + AxisRootSize = 90 +) + +// AxisRoots represents root commitment to multiple Shares. +// In practice, it is a commitment to all the Data in a square. +type AxisRoots = da.DataAvailabilityHeader + +// DataHash is a representation of the AxisRoots hash. +type DataHash []byte + +func (dh DataHash) Validate() error { + if len(dh) != DataHashSize { + return fmt.Errorf("invalid hash size, expected 32, got %d", len(dh)) + } + return nil +} + +func (dh DataHash) String() string { + return fmt.Sprintf("%X", []byte(dh)) +} + +// IsEmptyEDS check whether DataHash corresponds to the root of an empty block EDS. +func (dh DataHash) IsEmptyEDS() bool { + return bytes.Equal(EmptyEDSDataHash(), dh) +} + +// NewSHA256Hasher returns a new instance of a SHA-256 hasher. +func NewSHA256Hasher() hash.Hash { + return sha256.New() +} + +// NewAxisRoots generates AxisRoots(DataAvailabilityHeader) using the +// provided extended data square. +func NewAxisRoots(eds *rsmt2d.ExtendedDataSquare) (*AxisRoots, error) { + dah, err := da.NewDataAvailabilityHeader(eds) + if err != nil { + return nil, err + } + return &dah, nil +} + +// RowsWithNamespace inspects the AxisRoots for the Namespace and provides +// a slices of Row indexes containing the namespace. +func RowsWithNamespace(root *AxisRoots, namespace Namespace) (idxs []int) { + for i, row := range root.RowRoots { + if !namespace.IsOutsideRange(row, row) { + idxs = append(idxs, i) + } + } + return +} + +// RootHashForCoordinates returns the root hash for the given coordinates. +func RootHashForCoordinates(r *AxisRoots, axisType rsmt2d.Axis, rowIdx, colIdx uint) []byte { + if axisType == rsmt2d.Row { + return r.RowRoots[rowIdx] + } + return r.ColumnRoots[colIdx] +} + +// MustDataHashFromString converts a hex string to a valid datahash. +func MustDataHashFromString(datahash string) DataHash { + dh, err := hex.DecodeString(datahash) + if err != nil { + panic(fmt.Sprintf("datahash conversion: passed string was not valid hex: %s", datahash)) + } + err = DataHash(dh).Validate() + if err != nil { + panic(fmt.Sprintf("datahash validation: passed hex string failed: %s", err)) + } + return dh +} diff --git a/share/share.go b/share/share.go index 6ad4bbf40f..23d3780a09 100644 --- a/share/share.go +++ b/share/share.go @@ -1,13 +1,13 @@ package share import ( - "bytes" "crypto/sha256" - "encoding/hex" "fmt" - "hash" "github.com/celestiaorg/celestia-app/v2/pkg/appconsts" + "github.com/celestiaorg/go-square/shares" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" ) // DefaultRSMT2DCodec sets the default rsmt2d.Codec for shares. @@ -38,39 +38,47 @@ func GetData(s Share) []byte { return s[NamespaceSize:] } -// DataHash is a representation of the Root hash. -type DataHash []byte - -func (dh DataHash) Validate() error { - if len(dh) != 32 { - return fmt.Errorf("invalid hash size, expected 32, got %d", len(dh)) +// ValidateShare checks the size of a given share. +func ValidateShare(s Share) error { + if len(s) != Size { + return fmt.Errorf("invalid share size: %d", len(s)) } return nil } -func (dh DataHash) String() string { - return fmt.Sprintf("%X", []byte(dh)) +// TailPadding is a constant tail padding share exported for reuse +func TailPadding() Share { + return tailPadding } -// IsEmptyRoot check whether DataHash corresponds to the root of an empty block EDS. -func (dh DataHash) IsEmptyRoot() bool { - return bytes.Equal(EmptyRoot().Hash(), dh) +// ShareWithProof contains data with corresponding Merkle Proof +type ShareWithProof struct { //nolint: revive + // Share is a full data including namespace + Share + // Proof is a Merkle Proof of current share + Proof *nmt.Proof + // Axis is a type of axis against which the share proof is computed + Axis rsmt2d.Axis } -// MustDataHashFromString converts a hex string to a valid datahash. -func MustDataHashFromString(datahash string) DataHash { - dh, err := hex.DecodeString(datahash) - if err != nil { - panic(fmt.Sprintf("datahash conversion: passed string was not valid hex: %s", datahash)) - } - err = DataHash(dh).Validate() - if err != nil { - panic(fmt.Sprintf("datahash validation: passed hex string failed: %s", err)) +// Validate validates inclusion of the share under the given root CID. +func (s *ShareWithProof) Validate(rootHash []byte, x, y, edsSize int) bool { + isParity := x >= edsSize/2 || y >= edsSize/2 + namespace := ParitySharesNamespace + if !isParity { + namespace = GetNamespace(s.Share) } - return dh + return s.Proof.VerifyInclusion( + sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally + namespace.ToNMT(), + [][]byte{s.Share}, + rootHash, + ) } -// NewSHA256Hasher returns a new instance of a SHA-256 hasher. -func NewSHA256Hasher() hash.Hash { - return sha256.New() +var tailPadding Share + +func init() { + shr := shares.TailPaddingShare() + tailPadding = shr.ToBytes() } diff --git a/share/sharetest/testing.go b/share/sharetest/testing.go index 8d9b824b69..f804e18ce9 100644 --- a/share/sharetest/testing.go +++ b/share/sharetest/testing.go @@ -38,17 +38,26 @@ func RandShares(t testing.TB, total int) []share.Share { } // RandSharesWithNamespace is same the as RandShares, but sets same namespace for all shares. -func RandSharesWithNamespace(t testing.TB, namespace share.Namespace, total int) []share.Share { +func RandSharesWithNamespace(t testing.TB, namespace share.Namespace, namespacedAmount, total int) []share.Share { if total&(total-1) != 0 { t.Errorf("total must be power of 2: %d", total) t.FailNow() } + if namespacedAmount > total { + t.Errorf("withNamespace must be less than total: %d", total) + t.FailNow() + } + shares := make([]share.Share, total) rnd := rand.New(rand.NewSource(time.Now().Unix())) //nolint:gosec for i := range shares { shr := make([]byte, share.Size) - copy(share.GetNamespace(shr), namespace) + if i < namespacedAmount { + copy(share.GetNamespace(shr), namespace) + } else { + copy(share.GetNamespace(shr), RandV0Namespace()) + } _, err := rnd.Read(share.GetData(shr)) require.NoError(t, err) shares[i] = shr diff --git a/share/shwap/eds.go b/share/shwap/eds.go new file mode 100644 index 0000000000..2d856c5317 --- /dev/null +++ b/share/shwap/eds.go @@ -0,0 +1,6 @@ +package shwap + +// EDSName is the name identifier for the Extended Data Square. +const EDSName = "eds_v0" + +// NOTE: There is no EDS container as it's already defined by rsmt2d and eds.Accessor interface. diff --git a/share/shwap/eds_id.go b/share/shwap/eds_id.go new file mode 100644 index 0000000000..75805f11fb --- /dev/null +++ b/share/shwap/eds_id.go @@ -0,0 +1,95 @@ +package shwap + +import ( + "encoding/binary" + "fmt" + "io" +) + +// EdsIDSize defines the byte size of the EdsID. +const EdsIDSize = 8 + +// EdsID represents a unique identifier for a row, using the height of the block +// to identify the data square in the chain. +type EdsID struct { + Height uint64 // Height specifies the block height. +} + +// NewEdsID creates a new EdsID using the given height. +func NewEdsID(height uint64) (EdsID, error) { + eid := EdsID{ + Height: height, + } + return eid, eid.Validate() +} + +// EdsIDFromBinary decodes a byte slice into an EdsID, validating the length of the data. +// It returns an error if the data slice does not match the expected size of an EdsID. +func EdsIDFromBinary(data []byte) (EdsID, error) { + if len(data) != EdsIDSize { + return EdsID{}, fmt.Errorf("invalid EdsID data length: %d != %d", len(data), EdsIDSize) + } + eid := EdsID{ + Height: binary.BigEndian.Uint64(data), + } + if err := eid.Validate(); err != nil { + return EdsID{}, fmt.Errorf("validating EdsID: %w", err) + } + + return eid, nil +} + +// Equals checks equality of EdsIDs. +func (eid *EdsID) Equals(other EdsID) bool { + return eid.Height == other.Height +} + +// ReadFrom reads the binary form of EdsID from the provided reader. +func (eid *EdsID) ReadFrom(r io.Reader) (int64, error) { + data := make([]byte, EdsIDSize) + n, err := io.ReadFull(r, data) + if err != nil { + return int64(n), err + } + if n != EdsIDSize { + return int64(n), fmt.Errorf("EdsID: expected %d bytes, got %d", EdsIDSize, n) + } + id, err := EdsIDFromBinary(data) + if err != nil { + return int64(n), fmt.Errorf("EdsIDFromBinary: %w", err) + } + *eid = id + return int64(n), nil +} + +// MarshalBinary encodes an EdsID into its binary form, primarily for storage or network +// transmission. +func (eid EdsID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, EdsIDSize) + return eid.appendTo(data), nil +} + +// WriteTo writes the binary form of EdsID to the provided writer. +func (eid EdsID) WriteTo(w io.Writer) (int64, error) { + data, err := eid.MarshalBinary() + if err != nil { + return 0, err + } + n, err := w.Write(data) + return int64(n), err +} + +// Validate checks the integrity of an EdsID's fields against the provided Root. +// It ensures that the EdsID is not constructed with a zero Height and that the root is not nil. +func (eid EdsID) Validate() error { + if eid.Height == 0 { + return fmt.Errorf("%w: Height == 0", ErrInvalidID) + } + return nil +} + +// appendTo helps in the binary encoding of EdsID by appending the binary form of Height to the +// given byte slice. +func (eid EdsID) appendTo(data []byte) []byte { + return binary.BigEndian.AppendUint64(data, eid.Height) +} diff --git a/share/shwap/eds_id_test.go b/share/shwap/eds_id_test.go new file mode 100644 index 0000000000..be6cd21537 --- /dev/null +++ b/share/shwap/eds_id_test.go @@ -0,0 +1,42 @@ +package shwap + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEdsID(t *testing.T) { + id, err := NewEdsID(2) + require.NoError(t, err) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + idOut, err := EdsIDFromBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, idOut) + + err = idOut.Validate() + require.NoError(t, err) + require.True(t, id.Equals(idOut)) +} + +func TestEdsIDReaderWriter(t *testing.T) { + id, err := NewEdsID(2) + require.NoError(t, err) + + buf := bytes.NewBuffer(nil) + n, err := id.WriteTo(buf) + require.NoError(t, err) + require.Equal(t, int64(EdsIDSize), n) + + eidOut := EdsID{} + n, err = eidOut.ReadFrom(buf) + require.NoError(t, err) + require.Equal(t, int64(EdsIDSize), n) + + require.EqualValues(t, id, eidOut) +} diff --git a/share/shwap/getter.go b/share/shwap/getter.go new file mode 100644 index 0000000000..8a2b3389f5 --- /dev/null +++ b/share/shwap/getter.go @@ -0,0 +1,44 @@ +package shwap + +import ( + "context" + "errors" + "fmt" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +var ( + // ErrOperationNotSupported is used to indicate that the operation is not supported by the + // implementation of the getter interface. + ErrOperationNotSupported = errors.New("operation is not supported") + // ErrNotFound is used to indicate that requested data could not be found. + ErrNotFound = errors.New("data not found") + // ErrInvalidID is used to indicate that an ID failed validation. + ErrInvalidID = errors.New("invalid shwap ID") + // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the + // square size. + ErrOutOfBounds = fmt.Errorf("index out of bounds: %w", ErrInvalidID) +) + +// Getter interface provides a set of accessors for shares by the Root. +// Automatically verifies integrity of shares(exceptions possible depending on the implementation). +// +//go:generate mockgen -destination=getters/mock/getter.go -package=mock . Getter +type Getter interface { + // GetShare gets a Share by coordinates in EDS. + GetShare(ctx context.Context, header *header.ExtendedHeader, row, col int) (share.Share, error) + + // GetEDS gets the full EDS identified by the given extended header. + GetEDS(context.Context, *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) + + // GetSharesByNamespace gets all shares from an EDS within the given namespace. + // Shares are returned in a row-by-row order if the namespace spans multiple rows. + // Inclusion of returned data could be verified using Verify method on NamespacedShares. + // If no shares are found for target namespace non-inclusion could be also verified by calling + // Verify method. + GetSharesByNamespace(context.Context, *header.ExtendedHeader, share.Namespace) (NamespaceData, error) +} diff --git a/share/getters/cascade.go b/share/shwap/getters/cascade.go similarity index 77% rename from share/getters/cascade.go rename to share/shwap/getters/cascade.go index 3b8e8060bd..bcd36f64c0 100644 --- a/share/getters/cascade.go +++ b/share/shwap/getters/cascade.go @@ -5,6 +5,8 @@ import ( "errors" "time" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -14,26 +16,32 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/byzantine" + "github.com/celestiaorg/celestia-node/share/shwap" ) -var _ share.Getter = (*CascadeGetter)(nil) +var ( + tracer = otel.Tracer("share/getters") + log = logging.Logger("share/getters") +) + +var _ shwap.Getter = (*CascadeGetter)(nil) -// CascadeGetter implements custom share.Getter that composes multiple Getter implementations in +// CascadeGetter implements custom shwap.Getter that composes multiple Getter implementations in // "cascading" order. // // See cascade func for details on cascading. type CascadeGetter struct { - getters []share.Getter + getters []shwap.Getter } -// NewCascadeGetter instantiates a new CascadeGetter from given share.Getters with given interval. -func NewCascadeGetter(getters []share.Getter) *CascadeGetter { +// NewCascadeGetter instantiates a new CascadeGetter from given shwap.Getters with given interval. +func NewCascadeGetter(getters []shwap.Getter) *CascadeGetter { return &CascadeGetter{ getters: getters, } } -// GetShare gets a share from any of registered share.Getters in cascading order. +// GetShare gets a share from any of registered shwap.Getters in cascading order. func (cg *CascadeGetter) GetShare( ctx context.Context, header *header.ExtendedHeader, row, col int, ) (share.Share, error) { @@ -45,44 +53,44 @@ func (cg *CascadeGetter) GetShare( upperBound := len(header.DAH.RowRoots) if row >= upperBound || col >= upperBound { - err := share.ErrOutOfBounds + err := shwap.ErrOutOfBounds span.RecordError(err) return nil, err } - get := func(ctx context.Context, get share.Getter) (share.Share, error) { + get := func(ctx context.Context, get shwap.Getter) (share.Share, error) { return get.GetShare(ctx, header, row, col) } return cascadeGetters(ctx, cg.getters, get) } -// GetEDS gets a full EDS from any of registered share.Getters in cascading order. +// GetEDS gets a full EDS from any of registered shwap.Getters in cascading order. func (cg *CascadeGetter) GetEDS( ctx context.Context, header *header.ExtendedHeader, ) (*rsmt2d.ExtendedDataSquare, error) { ctx, span := tracer.Start(ctx, "cascade/get-eds") defer span.End() - get := func(ctx context.Context, get share.Getter) (*rsmt2d.ExtendedDataSquare, error) { + get := func(ctx context.Context, get shwap.Getter) (*rsmt2d.ExtendedDataSquare, error) { return get.GetEDS(ctx, header) } return cascadeGetters(ctx, cg.getters, get) } -// GetSharesByNamespace gets NamespacedShares from any of registered share.Getters in cascading +// GetSharesByNamespace gets NamespacedShares from any of registered shwap.Getters in cascading // order. func (cg *CascadeGetter) GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, -) (share.NamespacedShares, error) { +) (shwap.NamespaceData, error) { ctx, span := tracer.Start(ctx, "cascade/get-shares-by-namespace", trace.WithAttributes( attribute.String("namespace", namespace.String()), )) defer span.End() - get := func(ctx context.Context, get share.Getter) (share.NamespacedShares, error) { + get := func(ctx context.Context, get shwap.Getter) (shwap.NamespaceData, error) { return get.GetSharesByNamespace(ctx, header, namespace) } @@ -99,8 +107,8 @@ func (cg *CascadeGetter) GetSharesByNamespace( // NOTE: New source attempts after interval do suspend running sources in progress. func cascadeGetters[V any]( ctx context.Context, - getters []share.Getter, - get func(context.Context, share.Getter) (V, error), + getters []shwap.Getter, + get func(context.Context, shwap.Getter) (V, error), ) (V, error) { var ( zero V @@ -134,14 +142,14 @@ func cascadeGetters[V any]( // we split the timeout between left getters // once async cascadegetter is implemented, we can remove this - getCtx, cancel := ctxWithSplitTimeout(ctx, len(getters)-i, minTimeout) + getCtx, cancel := utils.CtxWithSplitTimeout(ctx, len(getters)-i, minTimeout) val, getErr := get(getCtx, getter) cancel() if getErr == nil { return val, nil } - if errors.Is(getErr, errOperationNotSupported) { + if errors.Is(getErr, shwap.ErrOperationNotSupported) { continue } diff --git a/share/getters/cascade_test.go b/share/shwap/getters/cascade_test.go similarity index 77% rename from share/getters/cascade_test.go rename to share/shwap/getters/cascade_test.go index fc2f663bdc..a23568006f 100644 --- a/share/getters/cascade_test.go +++ b/share/shwap/getters/cascade_test.go @@ -12,8 +12,8 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/mocks" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/getters/mock" ) func TestCascadeGetter(t *testing.T) { @@ -22,7 +22,7 @@ func TestCascadeGetter(t *testing.T) { const gettersN = 3 headers := make([]*header.ExtendedHeader, gettersN) - getters := make([]share.Getter, gettersN) + getters := make([]shwap.Getter, gettersN) for i := range headers { getters[i], headers[i] = TestGetter(t) } @@ -50,10 +50,10 @@ func TestCascade(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - timeoutGetter := mocks.NewMockGetter(ctrl) - immediateFailGetter := mocks.NewMockGetter(ctrl) - successGetter := mocks.NewMockGetter(ctrl) - ctxGetter := mocks.NewMockGetter(ctrl) + timeoutGetter := mock.NewMockGetter(ctrl) + immediateFailGetter := mock.NewMockGetter(ctrl) + successGetter := mock.NewMockGetter(ctrl) + ctxGetter := mock.NewMockGetter(ctrl) timeoutGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). DoAndReturn(func(ctx context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { return nil, context.DeadlineExceeded @@ -67,43 +67,43 @@ func TestCascade(t *testing.T) { return nil, ctx.Err() }).AnyTimes() - stuckGetter := mocks.NewMockGetter(ctrl) + stuckGetter := mock.NewMockGetter(ctrl) stuckGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). DoAndReturn(func(ctx context.Context, _ *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { <-ctx.Done() return nil, ctx.Err() }).AnyTimes() - get := func(ctx context.Context, get share.Getter) (*rsmt2d.ExtendedDataSquare, error) { + get := func(ctx context.Context, get shwap.Getter) (*rsmt2d.ExtendedDataSquare, error) { return get.GetEDS(ctx, nil) } t.Run("SuccessFirst", func(t *testing.T) { - getters := []share.Getter{successGetter, timeoutGetter, immediateFailGetter} + getters := []shwap.Getter{successGetter, timeoutGetter, immediateFailGetter} _, err := cascadeGetters(ctx, getters, get) assert.NoError(t, err) }) t.Run("SuccessSecond", func(t *testing.T) { - getters := []share.Getter{immediateFailGetter, successGetter} + getters := []shwap.Getter{immediateFailGetter, successGetter} _, err := cascadeGetters(ctx, getters, get) assert.NoError(t, err) }) t.Run("SuccessSecondAfterFirst", func(t *testing.T) { - getters := []share.Getter{timeoutGetter, successGetter} + getters := []shwap.Getter{timeoutGetter, successGetter} _, err := cascadeGetters(ctx, getters, get) assert.NoError(t, err) }) t.Run("SuccessAfterMultipleTimeouts", func(t *testing.T) { - getters := []share.Getter{timeoutGetter, immediateFailGetter, timeoutGetter, timeoutGetter, successGetter} + getters := []shwap.Getter{timeoutGetter, immediateFailGetter, timeoutGetter, timeoutGetter, successGetter} _, err := cascadeGetters(ctx, getters, get) assert.NoError(t, err) }) t.Run("Error", func(t *testing.T) { - getters := []share.Getter{immediateFailGetter, timeoutGetter, immediateFailGetter} + getters := []shwap.Getter{immediateFailGetter, timeoutGetter, immediateFailGetter} _, err := cascadeGetters(ctx, getters, get) assert.Error(t, err) assert.Equal(t, strings.Count(err.Error(), "\n"), 2) @@ -112,20 +112,20 @@ func TestCascade(t *testing.T) { t.Run("Context Canceled", func(t *testing.T) { ctx, cancel := context.WithCancel(ctx) cancel() - getters := []share.Getter{ctxGetter, ctxGetter, ctxGetter} + getters := []shwap.Getter{ctxGetter, ctxGetter, ctxGetter} _, err := cascadeGetters(ctx, getters, get) assert.Error(t, err) assert.Equal(t, strings.Count(err.Error(), "\n"), 0) }) t.Run("Single", func(t *testing.T) { - getters := []share.Getter{successGetter} + getters := []shwap.Getter{successGetter} _, err := cascadeGetters(ctx, getters, get) assert.NoError(t, err) }) t.Run("Stuck getter", func(t *testing.T) { - getters := []share.Getter{stuckGetter, successGetter} + getters := []shwap.Getter{stuckGetter, successGetter} _, err := cascadeGetters(ctx, getters, get) assert.NoError(t, err) }) diff --git a/share/mocks/getter.go b/share/shwap/getters/mock/getter.go similarity index 89% rename from share/mocks/getter.go rename to share/shwap/getters/mock/getter.go index 738e2b246c..64e04a5132 100644 --- a/share/mocks/getter.go +++ b/share/shwap/getters/mock/getter.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/celestiaorg/celestia-node/share (interfaces: Getter) +// Source: github.com/celestiaorg/celestia-node/share/shwap (interfaces: Getter) -// Package mocks is a generated GoMock package. -package mocks +// Package mock is a generated GoMock package. +package mock import ( context "context" @@ -10,6 +10,7 @@ import ( header "github.com/celestiaorg/celestia-node/header" share "github.com/celestiaorg/celestia-node/share" + shwap "github.com/celestiaorg/celestia-node/share/shwap" rsmt2d "github.com/celestiaorg/rsmt2d" gomock "github.com/golang/mock/gomock" ) @@ -68,10 +69,10 @@ func (mr *MockGetterMockRecorder) GetShare(arg0, arg1, arg2, arg3 interface{}) * } // GetSharesByNamespace mocks base method. -func (m *MockGetter) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share.Namespace) (share.NamespacedShares, error) { +func (m *MockGetter) GetSharesByNamespace(arg0 context.Context, arg1 *header.ExtendedHeader, arg2 share.Namespace) (shwap.NamespaceData, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSharesByNamespace", arg0, arg1, arg2) - ret0, _ := ret[0].(share.NamespacedShares) + ret0, _ := ret[0].(shwap.NamespaceData) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/share/getters/testing.go b/share/shwap/getters/testing.go similarity index 80% rename from share/getters/testing.go rename to share/shwap/getters/testing.go index 665690ffe3..3714aa5260 100644 --- a/share/getters/testing.go +++ b/share/shwap/getters/testing.go @@ -14,13 +14,14 @@ import ( "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/shwap" ) // TestGetter provides a testing SingleEDSGetter and the root of the EDS it holds. -func TestGetter(t *testing.T) (share.Getter, *header.ExtendedHeader) { +func TestGetter(t *testing.T) (shwap.Getter, *header.ExtendedHeader) { eds := edstest.RandEDS(t, 8) - dah, err := share.NewRoot(eds) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) + roots, err := share.NewAxisRoots(eds) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) require.NoError(t, err) return &SingleEDSGetter{ EDS: eds, @@ -39,7 +40,7 @@ func (seg *SingleEDSGetter) GetShare( header *header.ExtendedHeader, row, col int, ) (share.Share, error) { - err := seg.checkRoot(header.DAH) + err := seg.checkRoots(header.DAH) if err != nil { return nil, err } @@ -51,7 +52,7 @@ func (seg *SingleEDSGetter) GetEDS( _ context.Context, header *header.ExtendedHeader, ) (*rsmt2d.ExtendedDataSquare, error) { - err := seg.checkRoot(header.DAH) + err := seg.checkRoots(header.DAH) if err != nil { return nil, err } @@ -60,17 +61,17 @@ func (seg *SingleEDSGetter) GetEDS( // GetSharesByNamespace returns NamespacedShares from a kept EDS if the correct root is given. func (seg *SingleEDSGetter) GetSharesByNamespace(context.Context, *header.ExtendedHeader, share.Namespace, -) (share.NamespacedShares, error) { +) (shwap.NamespaceData, error) { panic("SingleEDSGetter: GetSharesByNamespace is not implemented") } -func (seg *SingleEDSGetter) checkRoot(root *share.Root) error { +func (seg *SingleEDSGetter) checkRoots(roots *share.AxisRoots) error { dah, err := da.NewDataAvailabilityHeader(seg.EDS) if err != nil { return err } - if !root.Equals(&dah) { - return fmt.Errorf("unknown EDS: have %s, asked %s", dah.String(), root.String()) + if !roots.Equals(&dah) { + return fmt.Errorf("unknown EDS: have %s, asked %s", dah.String(), roots.String()) } return nil } diff --git a/share/shwap/namespace_data.go b/share/shwap/namespace_data.go new file mode 100644 index 0000000000..656b886f2a --- /dev/null +++ b/share/shwap/namespace_data.go @@ -0,0 +1,80 @@ +package shwap + +import ( + "errors" + "fmt" + "io" + + "github.com/celestiaorg/celestia-node/share" +) + +// NamespaceDataName is the name identifier for the namespace data container. +const NamespaceDataName = "nd_v0" + +// NamespaceData stores collections of RowNamespaceData, each representing shares and their proofs +// within a namespace. +// NOTE: NamespaceData does not have it protobuf Container representation and its only *streamed* +// as RowNamespaceData. The protobuf might be added as need comes. +type NamespaceData []RowNamespaceData + +// Flatten combines all shares from all rows within the namespace into a single slice. +func (nd NamespaceData) Flatten() []share.Share { + var shares []share.Share + for _, row := range nd { + shares = append(shares, row.Shares...) + } + return shares +} + +// Verify checks the integrity of the NamespaceData against a provided root and namespace. +func (nd NamespaceData) Verify(root *share.AxisRoots, namespace share.Namespace) error { + rowIdxs := share.RowsWithNamespace(root, namespace) + if len(rowIdxs) != len(nd) { + return fmt.Errorf("expected %d rows, found %d rows", len(rowIdxs), len(nd)) + } + + for i, row := range nd { + if err := row.Verify(root, namespace, rowIdxs[i]); err != nil { + return fmt.Errorf("validating row: %w", err) + } + } + return nil +} + +// ReadFrom reads NamespaceData from the provided reader implementing io.ReaderFrom. +// It reads series of length-delimited RowNamespaceData until EOF draining the stream. +func (nd *NamespaceData) ReadFrom(reader io.Reader) (int64, error) { + var ndNew []RowNamespaceData + var n int64 + for { + var rnd RowNamespaceData + nn, err := rnd.ReadFrom(reader) + n += nn + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return n, err + } + + ndNew = append(ndNew, rnd) + } + + // all rows have been read + *nd = ndNew + return n, nil +} + +// WriteTo writes the length-delimited protobuf of NamespaceData to the provided writer. +// implementing io.WriterTo. +func (nd NamespaceData) WriteTo(writer io.Writer) (int64, error) { + var n int64 + for _, rnd := range nd { + nn, err := rnd.WriteTo(writer) + n += nn + if err != nil { + return n, err + } + } + return n, nil +} diff --git a/share/shwap/namespace_data_id.go b/share/shwap/namespace_data_id.go new file mode 100644 index 0000000000..63b5f22948 --- /dev/null +++ b/share/shwap/namespace_data_id.go @@ -0,0 +1,120 @@ +package shwap + +import ( + "fmt" + "io" + + "github.com/celestiaorg/celestia-node/share" +) + +// NamespaceDataIDSize defines the total size of a NamespaceDataID in bytes, combining the +// size of a EdsID and the size of a Namespace. +const NamespaceDataIDSize = EdsIDSize + share.NamespaceSize + +// NamespaceDataID filters the data in the EDS by a specific namespace. +type NamespaceDataID struct { + // Embedding EdsID to include the block height. + EdsID + // DataNamespace will be used to identify the data within the EDS. + DataNamespace share.Namespace +} + +// NewNamespaceDataID creates a new NamespaceDataID with the specified parameters. It +// validates the namespace and returns an error if it is invalid. +func NewNamespaceDataID(height uint64, namespace share.Namespace) (NamespaceDataID, error) { + ndid := NamespaceDataID{ + EdsID: EdsID{ + Height: height, + }, + DataNamespace: namespace, + } + + if err := ndid.Validate(); err != nil { + return NamespaceDataID{}, err + } + return ndid, nil +} + +// NamespaceDataIDFromBinary deserializes a NamespaceDataID from its binary form. It returns +// an error if the binary data's length does not match the expected size. +func NamespaceDataIDFromBinary(data []byte) (NamespaceDataID, error) { + if len(data) != NamespaceDataIDSize { + return NamespaceDataID{}, + fmt.Errorf("invalid NamespaceDataID length: expected %d, got %d", NamespaceDataIDSize, len(data)) + } + + edsID, err := EdsIDFromBinary(data[:EdsIDSize]) + if err != nil { + return NamespaceDataID{}, fmt.Errorf("error unmarshaling EDSID: %w", err) + } + + ns := share.Namespace(data[EdsIDSize:]) + ndid := NamespaceDataID{ + EdsID: edsID, + DataNamespace: ns, + } + if err := ndid.Validate(); err != nil { + return NamespaceDataID{}, err + } + return ndid, nil +} + +// Equals checks equality of NamespaceDataID. +func (ndid *NamespaceDataID) Equals(other NamespaceDataID) bool { + return ndid.EdsID.Equals(other.EdsID) && ndid.DataNamespace.Equals(other.DataNamespace) +} + +// ReadFrom reads the binary form of NamespaceDataID from the provided reader. +func (ndid *NamespaceDataID) ReadFrom(r io.Reader) (int64, error) { + data := make([]byte, NamespaceDataIDSize) + n, err := io.ReadFull(r, data) + if err != nil { + return int64(n), err + } + if n != NamespaceDataIDSize { + return int64(n), fmt.Errorf("NamespaceDataID: expected %d bytes, got %d", NamespaceDataIDSize, n) + } + id, err := NamespaceDataIDFromBinary(data) + if err != nil { + return int64(n), fmt.Errorf("NamespaceDataIDFromBinary: %w", err) + } + *ndid = id + return int64(n), nil +} + +// MarshalBinary encodes NamespaceDataID into binary form. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 +func (ndid NamespaceDataID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, NamespaceDataIDSize) + return ndid.appendTo(data), nil +} + +// WriteTo writes the binary form of NamespaceDataID to the provided writer. +func (ndid NamespaceDataID) WriteTo(w io.Writer) (int64, error) { + data, err := ndid.MarshalBinary() + if err != nil { + return 0, err + } + n, err := w.Write(data) + return int64(n), err +} + +// Validate checks if the NamespaceDataID is valid. It checks the validity of the EdsID and the +// DataNamespace. +func (ndid NamespaceDataID) Validate() error { + if err := ndid.EdsID.Validate(); err != nil { + return fmt.Errorf("validating RowID: %w", err) + } + if err := ndid.DataNamespace.ValidateForData(); err != nil { + return fmt.Errorf("%w: validating DataNamespace: %w", ErrInvalidID, err) + } + return nil +} + +// appendTo helps in appending the binary form of DataNamespace to the serialized RowID data. +func (ndid NamespaceDataID) appendTo(data []byte) []byte { + data = ndid.EdsID.appendTo(data) + return append(data, ndid.DataNamespace...) +} diff --git a/share/shwap/namespace_data_id_test.go b/share/shwap/namespace_data_id_test.go new file mode 100644 index 0000000000..94b0a39703 --- /dev/null +++ b/share/shwap/namespace_data_id_test.go @@ -0,0 +1,48 @@ +package shwap + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestNamespaceDataID(t *testing.T) { + ns := sharetest.RandV0Namespace() + + id, err := NewNamespaceDataID(1, ns) + require.NoError(t, err) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + sidOut, err := NamespaceDataIDFromBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, sidOut) + + err = sidOut.Validate() + require.NoError(t, err) + require.True(t, id.Equals(sidOut)) +} + +func TestNamespaceDataIDReaderWriter(t *testing.T) { + ns := sharetest.RandV0Namespace() + + id, err := NewNamespaceDataID(1, ns) + require.NoError(t, err) + + buf := bytes.NewBuffer(nil) + n, err := id.WriteTo(buf) + require.NoError(t, err) + require.Equal(t, int64(NamespaceDataIDSize), n) + + ndidOut := NamespaceDataID{} + n, err = ndidOut.ReadFrom(buf) + require.NoError(t, err) + require.Equal(t, int64(NamespaceDataIDSize), n) + + require.EqualValues(t, id, ndidOut) +} diff --git a/share/shwap/p2p/bitswap/bitswap.go b/share/shwap/p2p/bitswap/bitswap.go new file mode 100644 index 0000000000..f63efe4012 --- /dev/null +++ b/share/shwap/p2p/bitswap/bitswap.go @@ -0,0 +1,163 @@ +package bitswap + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/ipfs/boxo/bitswap/client" + "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/bitswap/server" + "github.com/ipfs/boxo/blockstore" + blocks "github.com/ipfs/go-block-format" + delay "github.com/ipfs/go-ipfs-delay" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/protocol" +) + +// Client constants +const ( + // simulateDontHaves emulates DONT_HAVE message from a peer after 5 second timeout. + // This protects us from unresponsive/slow peers. + // TODO(@Wondertan): PR to bitswap to make this timeout configurable + // Higher timeout increases the probability of successful reconstruction + simulateDontHaves = true + // providerSearchDelay defines the initial delay before Bitswap client starts aggressive + // broadcasting of WANTs to all the peers. We offset this for longer than the default to minimize + // unnecessary broadcasting as in most cases we already have peers connected with needed data on + // a new request. + providerSearchDelay = time.Second * 10 + // rebroadcastDelay is similar to the providerSearchDelay, but it targets DHT/ContentRouting + // peer discovery and a gentle broadcast of a single random live WANT to all connected peers. + // Considering no DHT usage and broadcasting configured by providerSearchDelay, we set + // rebroadcastDelay to max value, effectively disabling it + rebroadcastDelay = 1<<63 - 1 +) + +// Server constants +const ( + // providesEnabled dictates Bitswap Server not to provide content to DHT/ContentRouting as we don't use it + providesEnabled = false + // sendDontHaves prevents Bitswap Server from sending DONT_HAVEs while keeping peers on hold instead: + // * Clients simulate DONT_HAVEs after timeout anyway + // * Servers may not have data immediately and this gives an opportunity to subscribe + // * This is necessary for reconstruction. See https://github.com/celestiaorg/celestia-node/issues/732 + sendDontHaves = false + // maxServerWantListsPerPeer defines the limit for maximum possible cached wants/requests per peer + // in the Bitswap. Exceeding this limit will cause Bitswap server to drop requested wants leaving + // client stuck for sometime. + // Thus, we make the limit a bit generous, so we minimize the chances of this happening. + // This is relevant until https://github.com/ipfs/boxo/pull/629#discussion_r1653362485 is fixed. + maxServerWantListsPerPeer = 8096 + // targetMessageSize defines how much data Bitswap will aim to pack within a single message, before + // splitting it up in multiple. Bitswap first looks up the size of the requested data across + // multiple requests and only after reads up the data in portions one-by-one targeting the + // targetMessageSize. + // + // Bigger number will speed transfers up if reading data from disk is fast. In our case, the + // Bitswap's size lookup via [Blockstore] will already cause underlying cache to keep the data, + // so reading up data is fast, and we can aim to pack as much as we can. + targetMessageSize = 1 << 20 // 1MB + // outstandingBytesPerPeer limits number of bytes queued for work for a peer across multiple requests. + // We set it to be equal to targetMessageSize * N, so there can max N messages being prepared for + // a peer at once. + outstandingBytesPerPeer = targetMessageSize * 4 +) + +// NewNetwork constructs Bitswap network for Shwap protocol composition. +func NewNetwork(host host.Host, prefix protocol.ID) network.BitSwapNetwork { + prefix = shwapProtocolID(prefix) + net := network.NewFromIpfsHost( + host, + routinghelpers.Null{}, + network.Prefix(prefix), + network.SupportedProtocols([]protocol.ID{protocolID}), + ) + return net +} + +// NewClient constructs a Bitswap client with parameters optimized for Shwap protocol composition. +// Meant to be used by Full and Light nodes. +func NewClient( + ctx context.Context, + net network.BitSwapNetwork, + bstore blockstore.Blockstore, +) *client.Client { + opts := []client.Option{ + client.SetSimulateDontHavesOnTimeout(simulateDontHaves), + client.ProviderSearchDelay(providerSearchDelay), + client.RebroadcastDelay(delay.Fixed(rebroadcastDelay)), + // Prevents Has calls to Blockstore for metric that counts duplicates + // Unnecessary for our use case, so we can save some disk lookups. + client.WithoutDuplicatedBlockStats(), + } + return client.New( + ctx, + net, + bstore, + opts..., + ) +} + +// NewServer construct a Bitswap server with parameters optimized for Shwap protocol composition. +// Meant to be used by Full nodes. +func NewServer( + ctx context.Context, + net network.BitSwapNetwork, + bstore blockstore.Blockstore, +) *server.Server { + opts := []server.Option{ + server.ProvideEnabled(providesEnabled), + server.SetSendDontHaves(sendDontHaves), + server.MaxQueuedWantlistEntriesPerPeer(maxServerWantListsPerPeer), + server.WithTargetMessageSize(targetMessageSize), + server.MaxOutstandingBytesPerPeer(outstandingBytesPerPeer), + } + return server.New(ctx, net, bstore, opts...) +} + +type Bitswap struct { + *client.Client + *server.Server +} + +func New( + ctx context.Context, + net network.BitSwapNetwork, + bstore blockstore.Blockstore, +) *Bitswap { + return &Bitswap{ + Client: NewClient(ctx, net, bstore), + Server: NewServer(ctx, net, bstore), + } +} + +func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { + return errors.Join( + bs.Client.NotifyNewBlocks(ctx, blks...), + bs.Server.NotifyNewBlocks(ctx, blks...), + ) +} + +func (bs *Bitswap) Close() error { + return errors.Join( + bs.Client.Close(), + bs.Server.Close(), + ) +} + +// TODO(@Wondertan): We have to use the protocol defined by Bitswap here +// +// due to a little bug. Bitswap allows setting custom protocols, but +// they have to be either one of the switch. +// https://github.com/ipfs/boxo/blob/dfd4a53ba828a368cec8d61c3fe12969ac6aa94c/bitswap/network/ipfs_impl.go#L250-L266 +var protocolID = network.ProtocolBitswap + +func shwapProtocolID(network protocol.ID) protocol.ID { + if network == "" { + return "" + } + return protocol.ID(fmt.Sprintf("%s/shwap", network)) +} diff --git a/share/shwap/p2p/bitswap/block.go b/share/shwap/p2p/bitswap/block.go new file mode 100644 index 0000000000..940afb5bc2 --- /dev/null +++ b/share/shwap/p2p/bitswap/block.go @@ -0,0 +1,38 @@ +package bitswap + +import ( + "context" + + "github.com/ipfs/go-cid" + logger "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +var log = logger.Logger("shwap/bitswap") + +// Block represents Bitswap compatible generalization over Shwap containers. +// All Shwap containers must have a registered wrapper +// implementing the interface in order to be compatible with Bitswap. +// NOTE: This is not a Blockchain block, but an IPFS/Bitswap block. +type Block interface { + // CID returns Shwap ID of the Block formatted as CID. + CID() cid.Cid + // Height reports the Height of the Shwap container behind the Block. + Height() uint64 + + // Populate fills up the Block with the Shwap container getting it out of the EDS + // Accessor. + Populate(context.Context, eds.Accessor) error + // Marshal serializes bytes of the Shwap Container the Block holds. + // MUST exclude the Shwap ID. + Marshal() ([]byte, error) + // UnmarshalFn returns closure that unmarshal the Block with the Shwap container. + // Unmarshalling involves data validation against the given AxisRoots. + UnmarshalFn(*share.AxisRoots) UnmarshalFn +} + +// UnmarshalFn is a closure produced by a Block that unmarshalls and validates +// the given serialized bytes of a Shwap container with ID and populates the Block with it on success. +type UnmarshalFn func(container, id []byte) error diff --git a/share/shwap/p2p/bitswap/block_fetch.go b/share/shwap/p2p/bitswap/block_fetch.go new file mode 100644 index 0000000000..7d7dcd1c90 --- /dev/null +++ b/share/shwap/p2p/bitswap/block_fetch.go @@ -0,0 +1,271 @@ +package bitswap + +import ( + "context" + "crypto/sha256" + "fmt" + "sync" + + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-node/share" +) + +// WithFetcher instructs [Fetch] to use the given Fetcher. +// Useful for reusable Fetcher sessions. +func WithFetcher(session exchange.Fetcher) FetchOption { + return func(options *fetchOptions) { + options.Session = session + } +} + +// WithStore instructs [Fetch] to store all the fetched Blocks into the given Blockstore. +func WithStore(store blockstore.Blockstore) FetchOption { + return func(options *fetchOptions) { + options.Store = store + } +} + +// Fetch fetches and populates given Blocks using Fetcher wrapping Bitswap. +// +// Validates Block against the given AxisRoots and skips Blocks that are already populated. +// Gracefully synchronize identical Blocks requested simultaneously. +// Blocks until either context is canceled or all Blocks are fetched and populated. +func Fetch( + ctx context.Context, + exchg exchange.Interface, + root *share.AxisRoots, + blks []Block, + opts ...FetchOption, +) error { + var from, to int + for to < len(blks) { + from, to = to, to+maxServerWantListsPerPeer + if to >= len(blks) { + to = len(blks) + } + + err := fetch(ctx, exchg, root, blks[from:to], opts...) + if err != nil { + return err + } + } + + return ctx.Err() +} + +// fetch fetches given Blocks. +// See [Fetch] for detailed description. +func fetch( + ctx context.Context, + exchg exchange.Interface, + root *share.AxisRoots, + blks []Block, + opts ...FetchOption, +) error { + var options fetchOptions + for _, opt := range opts { + opt(&options) + } + + fetcher := options.getFetcher(exchg) + cids := make([]cid.Cid, 0, len(blks)) + duplicates := make(map[cid.Cid]Block) + for _, blk := range blks { + cid := blk.CID() // memoize CID for reuse as it ain't free + cids = append(cids, cid) + + // store the UnmarshalFn s.t. hasher can access it + // and fill in the Block + unmarshalFn := blk.UnmarshalFn(root) + _, exists := unmarshalFns.LoadOrStore(cid, &unmarshalEntry{UnmarshalFn: unmarshalFn}) + if exists { + // the unmarshalFn has already been stored for the cid + // means there is ongoing fetch happening for the same cid + duplicates[cid] = blk // so mark the Block as duplicate + } else { + // cleanup are by the original requester and + // only after we are sure we got the block + defer unmarshalFns.Delete(cid) + } + } + + blkCh, err := fetcher.GetBlocks(ctx, cids) + if err != nil { + return fmt.Errorf("requesting Bitswap blocks: %w", err) + } + + for bitswapBlk := range blkCh { // GetBlocks closes blkCh on ctx cancellation + // NOTE: notification for duplicates is on purpose and to cover a flaky case + // It's harmless in practice to do additional notifications in case of duplicates + if err := exchg.NotifyNewBlocks(ctx, bitswapBlk); err != nil { + log.Error("failed to notify the new Bitswap block: %s", err) + } + + blk, ok := duplicates[bitswapBlk.Cid()] + if ok { + // uncommon duplicate case: concurrent fetching of the same block. + // The block hasn't been invoked inside hasher verification, + // so we have to unmarshal it ourselves. + unmarshalFn := blk.UnmarshalFn(root) + err := unmarshal(unmarshalFn, bitswapBlk.RawData()) + if err != nil { + // this means verification succeeded in the hasher but failed here + // this case should never happen in practice + // and if so something is really wrong + panic(fmt.Sprintf("unmarshaling duplicate block: %s", err)) + } + // NOTE: This approach has a downside that we redo deserialization and computationally + // expensive computation for as many duplicates. We tried solutions that doesn't have this + // problem, but they are *much* more complex. Considering this a rare edge-case the tradeoff + // towards simplicity has been made. + continue + } + // common case: the block was populated by the hasher + // so store it if requested + err := options.store(ctx, bitswapBlk) + if err != nil { + log.Error("failed to store the new Bitswap block: %s", err) + } + } + + return ctx.Err() +} + +// unmarshal unmarshalls the Shwap Container data into a Block with the given UnmarshalFn +func unmarshal(unmarshalFn UnmarshalFn, data []byte) error { + cid, containerData, err := unmarshalProto(data) + if err != nil { + return err + } + + id, err := extractFromCID(cid) + if err != nil { + return err + } + + err = unmarshalFn(containerData, id) + if err != nil { + return fmt.Errorf("verifying and unmarshalling container data: %w", err) + } + + return nil +} + +// unmarshalFns exist to communicate between Fetch and hasher, and it's global as a necessity +// +// Fetch registers UnmarshalFNs that hasher then uses to validate and unmarshal Block responses coming +// through Bitswap +// +// Bitswap does not provide *stateful* verification out of the box and by default +// messages are verified by their respective MultiHashes that are registered globally. +// For every Block type there is a global hasher registered that accesses stored UnmarshalFn once a +// message is received. It then uses UnmarshalFn to validate and fill in the respective Block +// +// sync.Map is used to minimize contention for disjoint keys +var unmarshalFns sync.Map + +// unmarshalEntry wraps UnmarshalFn with a mutex to protect it from concurrent access. +type unmarshalEntry struct { + sync.Mutex + UnmarshalFn +} + +// hasher implements hash.Hash to be registered as custom multihash +// hasher is the *hack* to inject custom verification logic into Bitswap +type hasher struct { + // IDSize of the respective Shwap container + IDSize int // to be set during hasher registration + + sum []byte +} + +func (h *hasher) Write(data []byte) (int, error) { + err := h.write(data) + if err != nil { + err = fmt.Errorf("hasher: %w", err) + log.Error(err) + return 0, fmt.Errorf("shwap/bitswap: %w", err) + } + + return len(data), nil +} + +func (h *hasher) write(data []byte) error { + cid, container, err := unmarshalProto(data) + if err != nil { + return fmt.Errorf("unmarshalling proto: %w", err) + } + + // get ID out of CID while validating it + id, err := extractFromCID(cid) + if err != nil { + return err + } + + // get registered UnmarshalFn and use it to check data validity and + // pass it to Fetch caller + val, ok := unmarshalFns.Load(cid) + if !ok { + return fmt.Errorf("no unmarshallers registered for %s", cid.String()) + } + entry := val.(*unmarshalEntry) + + // ensure UnmarshalFn is synchronized + // NOTE: Bitswap may call hasher.Write concurrently, which may call unmarshall concurrently + // this we need this synchronization. + entry.Lock() + err = entry.UnmarshalFn(container, id) + if err != nil { + return fmt.Errorf("verifying and unmarshalling container data: %w", err) + } + entry.Unlock() + + // set the id as resulting sum + // it's required for the sum to match the requested ID + // to satisfy hash contract and signal to Bitswap that data is correct + h.sum = id + return nil +} + +func (h *hasher) Sum([]byte) []byte { + return h.sum +} + +func (h *hasher) Reset() { + h.sum = nil +} + +func (h *hasher) Size() int { + return h.IDSize +} + +func (h *hasher) BlockSize() int { + return sha256.BlockSize +} + +type FetchOption func(*fetchOptions) + +type fetchOptions struct { + Session exchange.Fetcher + Store blockstore.Blockstore +} + +func (options *fetchOptions) getFetcher(exhng exchange.Interface) exchange.Fetcher { + if options.Session != nil { + return options.Session + } + + return exhng +} + +func (options *fetchOptions) store(ctx context.Context, blk blocks.Block) error { + if options.Store == nil { + return nil + } + + return options.Store.Put(ctx, blk) +} diff --git a/share/shwap/p2p/bitswap/block_fetch_test.go b/share/shwap/p2p/bitswap/block_fetch_test.go new file mode 100644 index 0000000000..6642801efe --- /dev/null +++ b/share/shwap/p2p/bitswap/block_fetch_test.go @@ -0,0 +1,175 @@ +package bitswap + +import ( + "context" + "math/rand/v2" + "sync" + "testing" + "time" + + "github.com/ipfs/boxo/bitswap/client" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/host" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds" +) + +func TestFetch_Options(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + const items = 128 + bstore, cids := testBlockstore(ctx, t, items) + + t.Run("WithBlockstore", func(t *testing.T) { + exchange := newExchange(ctx, t, bstore) + + blks := make([]Block, 0, cids.Len()) + _ = cids.ForEach(func(c cid.Cid) error { + blk, err := newEmptyTestBlock(c) + require.NoError(t, err) + blks = append(blks, blk) + return nil + }) + + bstore := blockstore.NewBlockstore(ds.NewMapDatastore()) + err := Fetch(ctx, exchange, nil, blks, WithStore(bstore)) + require.NoError(t, err) + + for _, blk := range blks { + ok, err := bstore.Has(ctx, blk.CID()) + require.NoError(t, err) + require.True(t, ok) + } + }) + + t.Run("WithFetcher", func(t *testing.T) { + exchange := newExchange(ctx, t, bstore) + + blks := make([]Block, 0, cids.Len()) + _ = cids.ForEach(func(c cid.Cid) error { + blk, err := newEmptyTestBlock(c) + require.NoError(t, err) + blks = append(blks, blk) + return nil + }) + + session := exchange.NewSession(ctx) + fetcher := &testFetcher{Embedded: session} + err := Fetch(ctx, exchange, nil, blks, WithFetcher(fetcher)) + require.NoError(t, err) + require.Equal(t, len(blks), fetcher.Fetched) + }) +} + +func TestFetch_Duplicates(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + const items = 128 + bstore, cids := testBlockstore(ctx, t, items) + exchange := newExchange(ctx, t, bstore) + + var wg sync.WaitGroup + for i := range items { + blks := make([]Block, 0, cids.Len()) + _ = cids.ForEach(func(c cid.Cid) error { + blk, err := newEmptyTestBlock(c) + require.NoError(t, err) + blks = append(blks, blk) + return nil + }) + + wg.Add(1) + go func(i int) { + rint := rand.IntN(10) + // this sleep ensures fetches aren't started simultaneously, allowing to check for edge-cases + time.Sleep(time.Millisecond * time.Duration(rint)) + + err := Fetch(ctx, exchange, nil, blks) + assert.NoError(t, err) + wg.Done() + }(i) + } + wg.Wait() + + var entries int + unmarshalFns.Range(func(key, _ any) bool { + unmarshalFns.Delete(key) + entries++ + return true + }) + require.Zero(t, entries) +} + +func newExchangeOverEDS(ctx context.Context, t *testing.T, rsmt2d *rsmt2d.ExtendedDataSquare) exchange.SessionExchange { + bstore := &Blockstore{ + Getter: testAccessorGetter{ + AccessorStreamer: &eds.Rsmt2D{ExtendedDataSquare: rsmt2d}, + }, + } + return newExchange(ctx, t, bstore) +} + +func newExchange(ctx context.Context, t *testing.T, bstore blockstore.Blockstore) exchange.SessionExchange { + net, err := mocknet.FullMeshLinked(3) + require.NoError(t, err) + + newServer(ctx, net.Hosts()[0], bstore) + newServer(ctx, net.Hosts()[1], bstore) + + client := newClient(ctx, net.Hosts()[2], bstore) + + err = net.ConnectAllButSelf() + require.NoError(t, err) + return client +} + +func newServer(ctx context.Context, host host.Host, store blockstore.Blockstore) { + net := NewNetwork(host, "test") + server := NewServer( + ctx, + net, + store, + ) + net.Start(server) +} + +func newClient(ctx context.Context, host host.Host, store blockstore.Blockstore) *client.Client { + net := NewNetwork(host, "test") + client := NewClient(ctx, net, store) + net.Start(client) + return client +} + +type testAccessorGetter struct { + eds.AccessorStreamer +} + +func (t testAccessorGetter) GetByHeight(context.Context, uint64) (eds.AccessorStreamer, error) { + return t.AccessorStreamer, nil +} + +type testFetcher struct { + Fetched int + + Embedded exchange.Fetcher +} + +func (t *testFetcher) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { + panic("not implemented") +} + +func (t *testFetcher) GetBlocks(ctx context.Context, cids []cid.Cid) (<-chan blocks.Block, error) { + t.Fetched += len(cids) + return t.Embedded.GetBlocks(ctx, cids) +} diff --git a/share/shwap/p2p/bitswap/block_proto.go b/share/shwap/p2p/bitswap/block_proto.go new file mode 100644 index 0000000000..8cab645705 --- /dev/null +++ b/share/shwap/p2p/bitswap/block_proto.go @@ -0,0 +1,46 @@ +package bitswap + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + bitswappb "github.com/celestiaorg/celestia-node/share/shwap/p2p/bitswap/pb" +) + +// marshalProto wraps the given Block in composition protobuf and marshals it. +func marshalProto(blk Block) ([]byte, error) { + containerData, err := blk.Marshal() + if err != nil { + return nil, fmt.Errorf("marshaling Shwap container: %w", err) + } + + blkProto := bitswappb.Block{ + Cid: blk.CID().Bytes(), + Container: containerData, + } + + blkData, err := blkProto.Marshal() + if err != nil { + return nil, fmt.Errorf("marshaling Bitswap Block protobuf: %w", err) + } + + return blkData, nil +} + +// unmarshalProto unwraps given data from composition protobuf and provides +// inner CID and serialized container data. +func unmarshalProto(data []byte) (cid.Cid, []byte, error) { + var blk bitswappb.Block + err := blk.Unmarshal(data) + if err != nil { + return cid.Undef, nil, fmt.Errorf("unmarshalling protobuf block: %w", err) + } + + cid, err := cid.Cast(blk.Cid) + if err != nil { + return cid, nil, fmt.Errorf("casting cid: %w", err) + } + + return cid, blk.Container, nil +} diff --git a/share/shwap/p2p/bitswap/block_registry.go b/share/shwap/p2p/bitswap/block_registry.go new file mode 100644 index 0000000000..94a53bc7b9 --- /dev/null +++ b/share/shwap/p2p/bitswap/block_registry.go @@ -0,0 +1,49 @@ +package bitswap + +import ( + "fmt" + "hash" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// EmptyBlock constructs an empty Block with type in the given CID. +func EmptyBlock(cid cid.Cid) (Block, error) { + spec, ok := specRegistry[cid.Prefix().MhType] + if !ok { + return nil, fmt.Errorf("unsupported Block type: %v", cid.Prefix().MhType) + } + + blk, err := spec.builder(cid) + if err != nil { + return nil, fmt.Errorf("failed to build a Block for %s: %w", spec.String(), err) + } + + return blk, nil +} + +// registerBlock registers the new Block type and multihash for it. +func registerBlock(mhcode, codec uint64, idSize int, bldrFn func(cid.Cid) (Block, error)) { + mh.Register(mhcode, func() hash.Hash { + return &hasher{IDSize: idSize} + }) + specRegistry[mhcode] = blockSpec{ + idSize: idSize, + codec: codec, + builder: bldrFn, + } +} + +// blockSpec holds constant metadata about particular Block types. +type blockSpec struct { + idSize int + codec uint64 + builder func(cid.Cid) (Block, error) +} + +func (spec *blockSpec) String() string { + return fmt.Sprintf("BlockSpec{IDSize: %d, Codec: %d}", spec.idSize, spec.codec) +} + +var specRegistry = make(map[uint64]blockSpec) diff --git a/share/shwap/p2p/bitswap/block_store.go b/share/shwap/p2p/bitswap/block_store.go new file mode 100644 index 0000000000..b1a5f1554e --- /dev/null +++ b/share/shwap/p2p/bitswap/block_store.go @@ -0,0 +1,114 @@ +package bitswap + +import ( + "context" + "errors" + "fmt" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store" +) + +// AccessorGetter abstracts storage system that indexes and manages multiple eds.AccessorGetter by +// network height. +type AccessorGetter interface { + // GetByHeight returns an Accessor by its height. + GetByHeight(ctx context.Context, height uint64) (eds.AccessorStreamer, error) +} + +// Blockstore implements generalized Bitswap compatible storage over Shwap containers +// that operates with Block and accesses data through AccessorGetter. +type Blockstore struct { + Getter AccessorGetter +} + +func (b *Blockstore) getBlock(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + blk, err := EmptyBlock(cid) + if err != nil { + return nil, err + } + + acc, err := b.Getter.GetByHeight(ctx, blk.Height()) + if errors.Is(err, store.ErrNotFound) { + log.Debugf("no EDS Accessor for height %v found", blk.Height()) + return nil, ipld.ErrNotFound{Cid: cid} + } + if err != nil { + return nil, fmt.Errorf("getting EDS Accessor for height %v: %w", blk.Height(), err) + } + defer func() { + if err := acc.Close(); err != nil { + log.Warnf("failed to close EDS accessor for height %v: %s", blk.Height(), err) + } + }() + + if err = blk.Populate(ctx, acc); err != nil { + return nil, fmt.Errorf("failed to populate Shwap Block on height %v: %w", blk.Height(), err) + } + + return convertBitswap(blk) +} + +func (b *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + blk, err := b.getBlock(ctx, cid) + if err != nil { + return nil, err + } + + return blk, nil +} + +func (b *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + // TODO(@Wondertan): There must be a way to derive size without reading, proving, serializing and + // allocating Sample's block.Block or we could do hashing + // NOTE:Bitswap uses GetSize also to determine if we have content stored or not + // so simply returning constant size is not an option + blk, err := b.Get(ctx, cid) + if err != nil { + return 0, err + } + return len(blk.RawData()), nil +} + +func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + _, err := b.Get(ctx, cid) + if err != nil { + return false, err + } + return true, nil +} + +func (b *Blockstore) Put(context.Context, blocks.Block) error { + panic("not implemented") +} + +func (b *Blockstore) PutMany(context.Context, []blocks.Block) error { + panic("not implemented") +} + +func (b *Blockstore) DeleteBlock(context.Context, cid.Cid) error { + panic("not implemented") +} + +func (b *Blockstore) AllKeysChan(context.Context) (<-chan cid.Cid, error) { panic("not implemented") } + +func (b *Blockstore) HashOnRead(bool) { panic("not implemented") } + +// convertBitswap converts and marshals Block to Bitswap Block. +func convertBitswap(blk Block) (blocks.Block, error) { + protoData, err := marshalProto(blk) + if err != nil { + return nil, fmt.Errorf("failed to wrap Block with proto: %w", err) + } + + bitswapBlk, err := blocks.NewBlockWithCid(protoData, blk.CID()) + if err != nil { + return nil, fmt.Errorf("assembling Bitswap block: %w", err) + } + + return bitswapBlk, nil +} diff --git a/share/shwap/p2p/bitswap/block_test.go b/share/shwap/p2p/bitswap/block_test.go new file mode 100644 index 0000000000..39bb55bc14 --- /dev/null +++ b/share/shwap/p2p/bitswap/block_test.go @@ -0,0 +1,112 @@ +package bitswap + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "testing" + "time" + + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +const ( + testCodec = 0x9999 + testMultihashCode = 0x9999 + testIDSize = 2 +) + +func init() { + registerBlock( + testMultihashCode, + testCodec, + testIDSize, + func(cid cid.Cid) (Block, error) { + return newEmptyTestBlock(cid) + }, + ) +} + +func testBlockstore(ctx context.Context, t *testing.T, items int) (blockstore.Blockstore, *cid.Set) { + bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + + cids := cid.NewSet() + for i := range items { + blk := newTestBlock(i) + bitswapBlk, err := convertBitswap(blk) + require.NoError(t, err) + err = bstore.Put(ctx, bitswapBlk) + require.NoError(t, err) + cids.Add(blk.CID()) + } + return bstore, cids +} + +type testID uint16 + +func (t testID) MarshalBinary() (data []byte, err error) { + data = binary.BigEndian.AppendUint16(data, uint16(t)) + return data, nil +} + +func (t *testID) UnmarshalBinary(data []byte) error { + *t = testID(binary.BigEndian.Uint16(data)) + return nil +} + +type testBlock struct { + id testID + data []byte +} + +func newTestBlock(id int) *testBlock { + bytes := make([]byte, 256) + _, _ = crand.Read(bytes) + return &testBlock{id: testID(id), data: bytes} +} + +func newEmptyTestBlock(cid cid.Cid) (*testBlock, error) { + idData, err := extractFromCID(cid) + if err != nil { + return nil, err + } + + var id testID + err = id.UnmarshalBinary(idData) + if err != nil { + return nil, err + } + + return &testBlock{id: id}, nil +} + +func (t *testBlock) CID() cid.Cid { + return encodeToCID(t.id, testMultihashCode, testCodec) +} + +func (t *testBlock) Height() uint64 { + return 1 +} + +func (t *testBlock) Populate(context.Context, eds.Accessor) error { + return nil // noop +} + +func (t *testBlock) Marshal() ([]byte, error) { + return t.data, nil +} + +func (t *testBlock) UnmarshalFn(*share.AxisRoots) UnmarshalFn { + return func(bytes, _ []byte) error { + t.data = bytes + time.Sleep(time.Millisecond * 1) + return nil + } +} diff --git a/share/shwap/p2p/bitswap/cid.go b/share/shwap/p2p/bitswap/cid.go new file mode 100644 index 0000000000..3c5531a7fd --- /dev/null +++ b/share/shwap/p2p/bitswap/cid.go @@ -0,0 +1,53 @@ +package bitswap + +import ( + "encoding" + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// extractFromCID retrieves Shwap ID out of the CID. +func extractFromCID(cid cid.Cid) ([]byte, error) { + if err := validateCID(cid); err != nil { + return nil, fmt.Errorf("invalid cid %s: %w", cid, err) + } + // mhPrefixSize is the size of the multihash prefix that used to cut it off. + const mhPrefixSize = 4 + return cid.Hash()[mhPrefixSize:], nil +} + +// encodeToCID encodes Shwap ID into the CID. +func encodeToCID(bm encoding.BinaryMarshaler, mhcode, codec uint64) cid.Cid { + data, err := bm.MarshalBinary() + if err != nil { + panic(fmt.Errorf("marshaling for CID: %w", err)) + } + + buf, err := mh.Encode(data, mhcode) + if err != nil { + panic(fmt.Errorf("encoding to CID: %w", err)) + } + + return cid.NewCidV1(codec, buf) +} + +// validateCID checks correctness of the CID. +func validateCID(cid cid.Cid) error { + prefix := cid.Prefix() + spec, ok := specRegistry[prefix.MhType] + if !ok { + return fmt.Errorf("unsupported multihash type %d", prefix.MhType) + } + + if prefix.Codec != spec.codec { + return fmt.Errorf("invalid CID codec %d", prefix.Codec) + } + + if prefix.MhLength != spec.idSize { + return fmt.Errorf("invalid multihash length %d", prefix.MhLength) + } + + return nil +} diff --git a/share/shwap/p2p/bitswap/getter.go b/share/shwap/p2p/bitswap/getter.go new file mode 100644 index 0000000000..9c55650a44 --- /dev/null +++ b/share/shwap/p2p/bitswap/getter.go @@ -0,0 +1,272 @@ +package bitswap + +import ( + "context" + "fmt" + + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/pruner" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var tracer = otel.Tracer("shwap/bitswap") + +// Getter implements share.Getter. +type Getter struct { + exchange exchange.SessionExchange + bstore blockstore.Blockstore + availWndw pruner.AvailabilityWindow + + availableSession exchange.Fetcher + archivalSession exchange.Fetcher + + cancel context.CancelFunc +} + +// NewGetter constructs a new Getter. +func NewGetter( + exchange exchange.SessionExchange, + bstore blockstore.Blockstore, + availWndw pruner.AvailabilityWindow, +) *Getter { + return &Getter{exchange: exchange, bstore: bstore, availWndw: availWndw} +} + +// Start kicks off internal fetching sessions. +// +// We keep Bitswap sessions for the whole Getter lifespan: +// - Sessions retain useful heuristics about peers, like TTFB +// - Sessions prefer peers that previously served us related content. +// +// So reusing session is expected to improve fetching performance. +// +// There are two sessions for archival and available data, so archival node peers aren't mixed +// with regular full node peers. +func (g *Getter) Start() { + ctx, cancel := context.WithCancel(context.Background()) + g.availableSession = g.exchange.NewSession(ctx) + g.archivalSession = g.exchange.NewSession(ctx) + g.cancel = cancel +} + +// Stop shuts down Getter's internal fetching session. +func (g *Getter) Stop() { + g.cancel() +} + +// GetShares uses [SampleBlock] and [Fetch] to get and verify samples for given coordinates. +// TODO(@Wondertan): Rework API to get coordinates as a single param to make it ergonomic. +func (g *Getter) GetShares( + ctx context.Context, + hdr *header.ExtendedHeader, + rowIdxs, colIdxs []int, +) ([]share.Share, error) { + if len(rowIdxs) != len(colIdxs) { + return nil, fmt.Errorf("row indecies and col indices must be same length") + } + + if len(rowIdxs) == 0 { + return nil, fmt.Errorf("empty coordinates") + } + + ctx, span := tracer.Start(ctx, "get-shares") + defer span.End() + + blks := make([]Block, len(rowIdxs)) + for i, rowIdx := range rowIdxs { + sid, err := NewEmptySampleBlock(hdr.Height(), rowIdx, colIdxs[i], len(hdr.DAH.RowRoots)) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "NewEmptySampleBlock") + return nil, err + } + + blks[i] = sid + } + + ses := g.session(ctx, hdr) + err := Fetch(ctx, g.exchange, hdr.DAH, blks, WithStore(g.bstore), WithFetcher(ses)) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Fetch") + return nil, err + } + + shares := make([]share.Share, len(blks)) + for i, blk := range blks { + shares[i] = blk.(*SampleBlock).Container.Share + } + + span.SetStatus(codes.Ok, "") + return shares, nil +} + +// GetShare uses [GetShare] to fetch and verify single share by the given coordinates. +func (g *Getter) GetShare( + ctx context.Context, + hdr *header.ExtendedHeader, + row, col int, +) (share.Share, error) { + shrs, err := g.GetShares(ctx, hdr, []int{row}, []int{col}) + if err != nil { + return nil, err + } + + if len(shrs) != 1 { + return nil, fmt.Errorf("expected 1 share row, got %d", len(shrs)) + } + + return shrs[0], nil +} + +// GetEDS uses [RowBlock] and [Fetch] to get half of the first EDS quadrant(ODS) and +// recomputes the whole EDS from it. +// We fetch the ODS or Q1 to ensure better compatibility with archival nodes that only +// store ODS and do not recompute other quadrants. +func (g *Getter) GetEDS( + ctx context.Context, + hdr *header.ExtendedHeader, +) (*rsmt2d.ExtendedDataSquare, error) { + ctx, span := tracer.Start(ctx, "get-eds") + defer span.End() + + sqrLn := len(hdr.DAH.RowRoots) + blks := make([]Block, sqrLn/2) + for i := 0; i < sqrLn/2; i++ { + blk, err := NewEmptyRowBlock(hdr.Height(), i, sqrLn) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "NewEmptyRowBlock") + return nil, err + } + + blks[i] = blk + } + + ses := g.session(ctx, hdr) + err := Fetch(ctx, g.exchange, hdr.DAH, blks, WithFetcher(ses)) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Fetch") + return nil, err + } + + rows := make([]shwap.Row, len(blks)) + for i, blk := range blks { + rows[i] = blk.(*RowBlock).Container + } + + square, err := edsFromRows(hdr.DAH, rows) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "edsFromRows") + return nil, err + } + + span.SetStatus(codes.Ok, "") + return square, nil +} + +// GetSharesByNamespace uses [RowNamespaceDataBlock] and [Fetch] to get all the data +// by the given namespace. If data spans over multiple rows, the request is split into +// parallel RowNamespaceDataID requests per each row and then assembled back into NamespaceData. +func (g *Getter) GetSharesByNamespace( + ctx context.Context, + hdr *header.ExtendedHeader, + ns share.Namespace, +) (shwap.NamespaceData, error) { + if err := ns.ValidateForData(); err != nil { + return nil, err + } + + ctx, span := tracer.Start(ctx, "get-shares-by-namespace") + defer span.End() + + rowIdxs := share.RowsWithNamespace(hdr.DAH, ns) + blks := make([]Block, len(rowIdxs)) + for i, rowNdIdx := range rowIdxs { + rndblk, err := NewEmptyRowNamespaceDataBlock(hdr.Height(), rowNdIdx, ns, len(hdr.DAH.RowRoots)) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "NewEmptyRowNamespaceDataBlock") + return nil, err + } + blks[i] = rndblk + } + + ses := g.session(ctx, hdr) + err := Fetch(ctx, g.exchange, hdr.DAH, blks, WithFetcher(ses)) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "Fetch") + return nil, err + } + + nsShrs := make(shwap.NamespaceData, len(blks)) + for i, blk := range blks { + rnd := blk.(*RowNamespaceDataBlock).Container + nsShrs[i] = shwap.RowNamespaceData{ + Shares: rnd.Shares, + Proof: rnd.Proof, + } + } + + span.SetStatus(codes.Ok, "") + return nsShrs, nil +} + +// session decides which fetching session to use for the given header. +func (g *Getter) session(ctx context.Context, hdr *header.ExtendedHeader) exchange.Fetcher { + session := g.archivalSession + + isWithinAvailability := pruner.IsWithinAvailabilityWindow(hdr.Time(), g.availWndw) + if isWithinAvailability { + session = g.availableSession + } + + trace.SpanFromContext(ctx).SetAttributes(attribute.Bool("within_availability", isWithinAvailability)) + return session +} + +// edsFromRows imports given Rows and computes EDS out of them, assuming enough Rows were provided. +// It is designed to reuse Row halves computed during verification on [Fetch] level. +func edsFromRows(roots *share.AxisRoots, rows []shwap.Row) (*rsmt2d.ExtendedDataSquare, error) { + shrs := make([]share.Share, len(roots.RowRoots)*len(roots.RowRoots)) + for i, row := range rows { + rowShrs, err := row.Shares() + if err != nil { + return nil, fmt.Errorf("decoding Shares out of Row: %w", err) + } + + for j, shr := range rowShrs { + shrs[j+(i*len(roots.RowRoots))] = shr + } + } + + square, err := rsmt2d.ImportExtendedDataSquare( + shrs, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(len(roots.RowRoots)/2)), + ) + if err != nil { + return nil, fmt.Errorf("importing EDS: %w", err) + } + + err = square.Repair(roots.RowRoots, roots.ColumnRoots) + if err != nil { + return nil, fmt.Errorf("repairing EDS: %w", err) + } + + return square, nil +} diff --git a/share/shwap/p2p/bitswap/getter_test.go b/share/shwap/p2p/bitswap/getter_test.go new file mode 100644 index 0000000000..757c34d7a0 --- /dev/null +++ b/share/shwap/p2p/bitswap/getter_test.go @@ -0,0 +1,27 @@ +package bitswap + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestEDSFromRows(t *testing.T) { + edsIn := edstest.RandEDS(t, 8) + roots, err := share.NewAxisRoots(edsIn) + require.NoError(t, err) + + rows := make([]shwap.Row, edsIn.Width()/2) + for i := range edsIn.Width() / 2 { + rowShrs := edsIn.Row(i)[:edsIn.Width()/2] + rows[i] = shwap.NewRow(rowShrs, shwap.Left) + } + + edsOut, err := edsFromRows(roots, rows) + require.NoError(t, err) + require.True(t, edsIn.Equals(edsOut)) +} diff --git a/share/shwap/p2p/bitswap/pb/bitswap.pb.go b/share/shwap/p2p/bitswap/pb/bitswap.pb.go new file mode 100644 index 0000000000..a84077e9ef --- /dev/null +++ b/share/shwap/p2p/bitswap/pb/bitswap.pb.go @@ -0,0 +1,372 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/shwap/p2p/bitswap/pb/bitswap.proto + +package pb + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Block struct { + Cid []byte `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` + Container []byte `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { + return fileDescriptor_09fd4e2ff1d5ce94, []int{0} +} +func (m *Block) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Block.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Block.Merge(m, src) +} +func (m *Block) XXX_Size() int { + return m.Size() +} +func (m *Block) XXX_DiscardUnknown() { + xxx_messageInfo_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Block proto.InternalMessageInfo + +func (m *Block) GetCid() []byte { + if m != nil { + return m.Cid + } + return nil +} + +func (m *Block) GetContainer() []byte { + if m != nil { + return m.Container + } + return nil +} + +func init() { + proto.RegisterType((*Block)(nil), "bitswap.Block") +} + +func init() { + proto.RegisterFile("share/shwap/p2p/bitswap/pb/bitswap.proto", fileDescriptor_09fd4e2ff1d5ce94) +} + +var fileDescriptor_09fd4e2ff1d5ce94 = []byte{ + // 171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x28, 0xce, 0x48, 0x2c, + 0x4a, 0xd5, 0x2f, 0xce, 0x28, 0x4f, 0x2c, 0xd0, 0x2f, 0x30, 0x2a, 0xd0, 0x4f, 0xca, 0x2c, 0x29, + 0x06, 0xb3, 0x93, 0x60, 0x4c, 0xbd, 0x82, 0xa2, 0xfc, 0x92, 0x7c, 0x21, 0x76, 0x28, 0x57, 0xc9, + 0x9c, 0x8b, 0xd5, 0x29, 0x27, 0x3f, 0x39, 0x5b, 0x48, 0x80, 0x8b, 0x39, 0x39, 0x33, 0x45, 0x82, + 0x51, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x14, 0x92, 0xe1, 0xe2, 0x4c, 0xce, 0xcf, 0x2b, 0x49, + 0xcc, 0xcc, 0x4b, 0x2d, 0x92, 0x60, 0x02, 0x8b, 0x23, 0x04, 0x9c, 0x22, 0x4f, 0x3c, 0x92, 0x63, + 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, + 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0xca, 0x3e, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, + 0x3f, 0x57, 0x3f, 0x39, 0x35, 0x27, 0xb5, 0xb8, 0x24, 0x33, 0x31, 0xbf, 0x28, 0x1d, 0xce, 0xd6, + 0xcd, 0xcb, 0x4f, 0x01, 0x39, 0x12, 0x97, 0x53, 0x93, 0xd8, 0xc0, 0x6e, 0x34, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xe7, 0x9c, 0x32, 0xc5, 0xcf, 0x00, 0x00, 0x00, +} + +func (m *Block) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Block) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Container) > 0 { + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintBitswap(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x12 + } + if len(m.Cid) > 0 { + i -= len(m.Cid) + copy(dAtA[i:], m.Cid) + i = encodeVarintBitswap(dAtA, i, uint64(len(m.Cid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintBitswap(dAtA []byte, offset int, v uint64) int { + offset -= sovBitswap(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Block) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cid) + if l > 0 { + n += 1 + l + sovBitswap(uint64(l)) + } + l = len(m.Container) + if l > 0 { + n += 1 + l + sovBitswap(uint64(l)) + } + return n +} + +func sovBitswap(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBitswap(x uint64) (n int) { + return sovBitswap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBitswap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBitswap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBitswap + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBitswap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBitswap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthBitswap + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthBitswap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = append(m.Container[:0], dAtA[iNdEx:postIndex]...) + if m.Container == nil { + m.Container = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBitswap(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBitswap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBitswap(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBitswap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBitswap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBitswap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBitswap + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBitswap + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBitswap + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBitswap = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBitswap = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBitswap = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/shwap/p2p/bitswap/pb/bitswap.proto b/share/shwap/p2p/bitswap/pb/bitswap.proto new file mode 100644 index 0000000000..3ba19aa49c --- /dev/null +++ b/share/shwap/p2p/bitswap/pb/bitswap.proto @@ -0,0 +1,9 @@ +// Defined in CIP-19 https://github.com/celestiaorg/CIPs/blob/82aeb7dfc472105a11babffd548c730c899a3d24/cips/cip-19.md +syntax = "proto3"; +package bitswap; +option go_package = "github.com/celestiaorg/celestia-node/share/shwap/p2p/bitswap/pb"; + +message Block { + bytes cid = 1; + bytes container = 2; +} diff --git a/share/shwap/p2p/bitswap/row_block.go b/share/shwap/p2p/bitswap/row_block.go new file mode 100644 index 0000000000..41ff19e791 --- /dev/null +++ b/share/shwap/p2p/bitswap/row_block.go @@ -0,0 +1,127 @@ +package bitswap + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +const ( + // rowCodec is a CID codec used for row Bitswap requests over Namespaced Merkle Tree. + rowCodec = 0x7800 + + // rowMultihashCode is the multihash code for custom axis sampling multihash function. + rowMultihashCode = 0x7801 +) + +func init() { + registerBlock( + rowMultihashCode, + rowCodec, + shwap.RowIDSize, + func(cid cid.Cid) (Block, error) { + return EmptyRowBlockFromCID(cid) + }, + ) +} + +// RowBlock is a Bitswap compatible block for Shwap's Row container. +type RowBlock struct { + ID shwap.RowID + + Container shwap.Row +} + +// NewEmptyRowBlock constructs a new empty RowBlock. +func NewEmptyRowBlock(height uint64, rowIdx, edsSize int) (*RowBlock, error) { + id, err := shwap.NewRowID(height, rowIdx, edsSize) + if err != nil { + return nil, err + } + + return &RowBlock{ID: id}, nil +} + +// EmptyRowBlockFromCID constructs an empty RowBlock out of the CID. +func EmptyRowBlockFromCID(cid cid.Cid) (*RowBlock, error) { + ridData, err := extractFromCID(cid) + if err != nil { + return nil, err + } + + rid, err := shwap.RowIDFromBinary(ridData) + if err != nil { + return nil, fmt.Errorf("while unmarhaling RowBlock: %w", err) + } + return &RowBlock{ID: rid}, nil +} + +func (rb *RowBlock) CID() cid.Cid { + return encodeToCID(rb.ID, rowMultihashCode, rowCodec) +} + +func (rb *RowBlock) Height() uint64 { + return rb.ID.Height +} + +func (rb *RowBlock) Marshal() ([]byte, error) { + if rb.Container.IsEmpty() { + return nil, fmt.Errorf("cannot marshal empty RowBlock") + } + + container := rb.Container.ToProto() + containerData, err := container.Marshal() + if err != nil { + return nil, fmt.Errorf("marshaling RowBlock container: %w", err) + } + + return containerData, nil +} + +func (rb *RowBlock) Populate(ctx context.Context, eds eds.Accessor) error { + half, err := eds.AxisHalf(ctx, rsmt2d.Row, rb.ID.RowIndex) + if err != nil { + return fmt.Errorf("accessing Row AxisHalf: %w", err) + } + + rb.Container = half.ToRow() + return nil +} + +func (rb *RowBlock) UnmarshalFn(root *share.AxisRoots) UnmarshalFn { + return func(cntrData, idData []byte) error { + if !rb.Container.IsEmpty() { + return nil + } + + rid, err := shwap.RowIDFromBinary(idData) + if err != nil { + return fmt.Errorf("unmarhaling RowID: %w", err) + } + + if !rb.ID.Equals(rid) { + return fmt.Errorf("requested %+v doesnt match given %+v", rb.ID, rid) + } + + var row shwappb.Row + if err := row.Unmarshal(cntrData); err != nil { + return fmt.Errorf("unmarshaling Row for %+v: %w", rb.ID, err) + } + + cntr := shwap.RowFromProto(&row) + if err := cntr.Verify(root, rb.ID.RowIndex); err != nil { + return fmt.Errorf("validating Row for %+v: %w", rb.ID, err) + } + + rb.Container = cntr + return nil + } +} diff --git a/share/shwap/p2p/bitswap/row_block_test.go b/share/shwap/p2p/bitswap/row_block_test.go new file mode 100644 index 0000000000..f609683da5 --- /dev/null +++ b/share/shwap/p2p/bitswap/row_block_test.go @@ -0,0 +1,38 @@ +package bitswap + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestRow_FetchRoundtrip(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + eds := edstest.RandEDS(t, 4) + root, err := share.NewAxisRoots(eds) + require.NoError(t, err) + exchange := newExchangeOverEDS(ctx, t, eds) + + blks := make([]Block, eds.Width()) + for i := range blks { + blk, err := NewEmptyRowBlock(1, i, len(root.RowRoots)) + require.NoError(t, err) + blks[i] = blk + } + + err = Fetch(ctx, exchange, root, blks) + require.NoError(t, err) + + for _, blk := range blks { + row := blk.(*RowBlock) + err = row.Container.Verify(root, row.ID.RowIndex) + require.NoError(t, err) + } +} diff --git a/share/shwap/p2p/bitswap/row_namespace_data_block.go b/share/shwap/p2p/bitswap/row_namespace_data_block.go new file mode 100644 index 0000000000..312f231027 --- /dev/null +++ b/share/shwap/p2p/bitswap/row_namespace_data_block.go @@ -0,0 +1,131 @@ +package bitswap + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +const ( + // rowNamespaceDataCodec is a CID codec used for data Bitswap requests over Namespaced Merkle Tree. + rowNamespaceDataCodec = 0x7820 + + // rowNamespaceDataMultihashCode is the multihash code for data multihash function. + rowNamespaceDataMultihashCode = 0x7821 +) + +func init() { + registerBlock( + rowNamespaceDataMultihashCode, + rowNamespaceDataCodec, + shwap.RowNamespaceDataIDSize, + func(cid cid.Cid) (Block, error) { + return EmptyRowNamespaceDataBlockFromCID(cid) + }, + ) +} + +// RowNamespaceDataBlock is a Bitswap compatible block for Shwap's RowNamespaceData container. +type RowNamespaceDataBlock struct { + ID shwap.RowNamespaceDataID + + Container shwap.RowNamespaceData +} + +// NewEmptyRowNamespaceDataBlock constructs a new empty RowNamespaceDataBlock. +func NewEmptyRowNamespaceDataBlock( + height uint64, + rowIdx int, + namespace share.Namespace, + edsSize int, +) (*RowNamespaceDataBlock, error) { + id, err := shwap.NewRowNamespaceDataID(height, rowIdx, namespace, edsSize) + if err != nil { + return nil, err + } + + return &RowNamespaceDataBlock{ID: id}, nil +} + +// EmptyRowNamespaceDataBlockFromCID constructs an empty RowNamespaceDataBlock out of the CID. +func EmptyRowNamespaceDataBlockFromCID(cid cid.Cid) (*RowNamespaceDataBlock, error) { + rndidData, err := extractFromCID(cid) + if err != nil { + return nil, err + } + + rndid, err := shwap.RowNamespaceDataIDFromBinary(rndidData) + if err != nil { + return nil, fmt.Errorf("unmarhalling RowNamespaceDataBlock: %w", err) + } + + return &RowNamespaceDataBlock{ID: rndid}, nil +} + +func (rndb *RowNamespaceDataBlock) CID() cid.Cid { + return encodeToCID(rndb.ID, rowNamespaceDataMultihashCode, rowNamespaceDataCodec) +} + +func (rndb *RowNamespaceDataBlock) Height() uint64 { + return rndb.ID.Height +} + +func (rndb *RowNamespaceDataBlock) Marshal() ([]byte, error) { + if rndb.Container.IsEmpty() { + return nil, fmt.Errorf("cannot marshal empty RowNamespaceDataBlock") + } + + container := rndb.Container.ToProto() + containerData, err := container.Marshal() + if err != nil { + return nil, fmt.Errorf("marshaling RowNamespaceDataBlock container: %w", err) + } + + return containerData, nil +} + +func (rndb *RowNamespaceDataBlock) Populate(ctx context.Context, eds eds.Accessor) error { + rnd, err := eds.RowNamespaceData(ctx, rndb.ID.DataNamespace, rndb.ID.RowIndex) + if err != nil { + return fmt.Errorf("accessing RowNamespaceData: %w", err) + } + + rndb.Container = rnd + return nil +} + +func (rndb *RowNamespaceDataBlock) UnmarshalFn(root *share.AxisRoots) UnmarshalFn { + return func(cntrData, idData []byte) error { + if !rndb.Container.IsEmpty() { + return nil + } + + rndid, err := shwap.RowNamespaceDataIDFromBinary(idData) + if err != nil { + return fmt.Errorf("unmarhaling RowNamespaceDataID: %w", err) + } + + if !rndb.ID.Equals(rndid) { + return fmt.Errorf("requested %+v doesnt match given %+v", rndb.ID, rndid) + } + + var rnd shwappb.RowNamespaceData + if err := rnd.Unmarshal(cntrData); err != nil { + return fmt.Errorf("unmarshaling RowNamespaceData for %+v: %w", rndb.ID, err) + } + + cntr := shwap.RowNamespaceDataFromProto(&rnd) + if err := cntr.Verify(root, rndb.ID.DataNamespace, rndb.ID.RowIndex); err != nil { + return fmt.Errorf("validating RowNamespaceData for %+v: %w", rndb.ID, err) + } + + rndb.Container = cntr + return nil + } +} diff --git a/share/shwap/p2p/bitswap/row_namespace_data_block_test.go b/share/shwap/p2p/bitswap/row_namespace_data_block_test.go new file mode 100644 index 0000000000..27deee8823 --- /dev/null +++ b/share/shwap/p2p/bitswap/row_namespace_data_block_test.go @@ -0,0 +1,39 @@ +package bitswap + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestRowNamespaceData_FetchRoundtrip(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + namespace := sharetest.RandV0Namespace() + eds, root := edstest.RandEDSWithNamespace(t, namespace, 64, 16) + exchange := newExchangeOverEDS(ctx, t, eds) + + rowIdxs := share.RowsWithNamespace(root, namespace) + blks := make([]Block, len(rowIdxs)) + for i, rowIdx := range rowIdxs { + blk, err := NewEmptyRowNamespaceDataBlock(1, rowIdx, namespace, len(root.RowRoots)) + require.NoError(t, err) + blks[i] = blk + } + + err := Fetch(ctx, exchange, root, blks) + require.NoError(t, err) + + for _, blk := range blks { + rnd := blk.(*RowNamespaceDataBlock) + err = rnd.Container.Verify(root, rnd.ID.DataNamespace, rnd.ID.RowIndex) + require.NoError(t, err) + } +} diff --git a/share/shwap/p2p/bitswap/sample_block.go b/share/shwap/p2p/bitswap/sample_block.go new file mode 100644 index 0000000000..c96c5f4d3f --- /dev/null +++ b/share/shwap/p2p/bitswap/sample_block.go @@ -0,0 +1,126 @@ +package bitswap + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" + shwappb "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +const ( + // sampleCodec is a CID codec used for share sampling Bitswap requests over Namespaced + // Merkle Tree. + sampleCodec = 0x7810 + + // sampleMultihashCode is the multihash code for share sampling multihash function. + sampleMultihashCode = 0x7811 +) + +func init() { + registerBlock( + sampleMultihashCode, + sampleCodec, + shwap.SampleIDSize, + func(cid cid.Cid) (Block, error) { + return EmptySampleBlockFromCID(cid) + }, + ) +} + +// SampleBlock is a Bitswap compatible block for Shwap's Sample container. +type SampleBlock struct { + ID shwap.SampleID + Container shwap.Sample +} + +// NewEmptySampleBlock constructs a new empty SampleBlock. +func NewEmptySampleBlock(height uint64, rowIdx, colIdx, edsSize int) (*SampleBlock, error) { + id, err := shwap.NewSampleID(height, rowIdx, colIdx, edsSize) + if err != nil { + return nil, err + } + + return &SampleBlock{ID: id}, nil +} + +// EmptySampleBlockFromCID constructs an empty SampleBlock out of the CID. +func EmptySampleBlockFromCID(cid cid.Cid) (*SampleBlock, error) { + sidData, err := extractFromCID(cid) + if err != nil { + return nil, err + } + + sid, err := shwap.SampleIDFromBinary(sidData) + if err != nil { + return nil, fmt.Errorf("while unmarhaling SampleBlock: %w", err) + } + + return &SampleBlock{ID: sid}, nil +} + +func (sb *SampleBlock) CID() cid.Cid { + return encodeToCID(sb.ID, sampleMultihashCode, sampleCodec) +} + +func (sb *SampleBlock) Height() uint64 { + return sb.ID.Height +} + +func (sb *SampleBlock) Marshal() ([]byte, error) { + if sb.Container.IsEmpty() { + return nil, fmt.Errorf("cannot marshal empty SampleBlock") + } + + container := sb.Container.ToProto() + containerData, err := container.Marshal() + if err != nil { + return nil, fmt.Errorf("marshaling SampleBlock container: %w", err) + } + + return containerData, nil +} + +func (sb *SampleBlock) Populate(ctx context.Context, eds eds.Accessor) error { + smpl, err := eds.Sample(ctx, sb.ID.RowIndex, sb.ID.ShareIndex) + if err != nil { + return fmt.Errorf("accessing Sample: %w", err) + } + + sb.Container = smpl + return nil +} + +func (sb *SampleBlock) UnmarshalFn(root *share.AxisRoots) UnmarshalFn { + return func(cntrData, idData []byte) error { + if !sb.Container.IsEmpty() { + return nil + } + + sid, err := shwap.SampleIDFromBinary(idData) + if err != nil { + return fmt.Errorf("unmarhaling SampleID: %w", err) + } + + if !sb.ID.Equals(sid) { + return fmt.Errorf("requested %+v doesnt match given %+v", sb.ID, sid) + } + + var sample shwappb.Sample + if err := sample.Unmarshal(cntrData); err != nil { + return fmt.Errorf("unmarshaling Sample for %+v: %w", sb.ID, err) + } + + cntr := shwap.SampleFromProto(&sample) + if err := cntr.Verify(root, sb.ID.RowIndex, sb.ID.ShareIndex); err != nil { + return fmt.Errorf("validating Sample for %+v: %w", sb.ID, err) + } + + sb.Container = cntr + return nil + } +} diff --git a/share/shwap/p2p/bitswap/sample_block_test.go b/share/shwap/p2p/bitswap/sample_block_test.go new file mode 100644 index 0000000000..2a28e7e4c9 --- /dev/null +++ b/share/shwap/p2p/bitswap/sample_block_test.go @@ -0,0 +1,41 @@ +package bitswap + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestSample_FetchRoundtrip(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + eds := edstest.RandEDS(t, 32) + root, err := share.NewAxisRoots(eds) + require.NoError(t, err) + exchange := newExchangeOverEDS(ctx, t, eds) + + width := int(eds.Width()) + blks := make([]Block, 0, width*width) + for x := 0; x < width; x++ { + for y := 0; y < width; y++ { + blk, err := NewEmptySampleBlock(1, x, y, len(root.RowRoots)) + require.NoError(t, err) + blks = append(blks, blk) + } + } + + err = Fetch(ctx, exchange, root, blks) + require.NoError(t, err) + + for _, sample := range blks { + blk := sample.(*SampleBlock) + err = blk.Container.Verify(root, blk.ID.RowIndex, blk.ID.ShareIndex) + require.NoError(t, err) + } +} diff --git a/share/p2p/discovery/backoff.go b/share/shwap/p2p/discovery/backoff.go similarity index 100% rename from share/p2p/discovery/backoff.go rename to share/shwap/p2p/discovery/backoff.go diff --git a/share/p2p/discovery/backoff_test.go b/share/shwap/p2p/discovery/backoff_test.go similarity index 100% rename from share/p2p/discovery/backoff_test.go rename to share/shwap/p2p/discovery/backoff_test.go diff --git a/share/shwap/p2p/discovery/dht.go b/share/shwap/p2p/discovery/dht.go new file mode 100644 index 0000000000..b3f9351c7c --- /dev/null +++ b/share/shwap/p2p/discovery/dht.go @@ -0,0 +1,38 @@ +package discovery + +import ( + "context" + "fmt" + "time" + + "github.com/ipfs/go-datastore" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +const ( + defaultRoutingRefreshPeriod = time.Minute +) + +// PeerRouting provides constructor for PeerRouting over DHT. +// Basically, this provides a way to discover peer addresses by respecting public keys. +func NewDHT( + ctx context.Context, + prefix string, + bootsrappers []peer.AddrInfo, + host host.Host, + dataStore datastore.Batching, + mode dht.ModeOpt, +) (*dht.IpfsDHT, error) { + opts := []dht.Option{ + dht.BootstrapPeers(bootsrappers...), + dht.ProtocolPrefix(protocol.ID(fmt.Sprintf("/celestia/%s", prefix))), + dht.Datastore(dataStore), + dht.RoutingTableRefreshPeriod(defaultRoutingRefreshPeriod), + dht.Mode(mode), + } + + return dht.New(ctx, host, opts...) +} diff --git a/share/p2p/discovery/discovery.go b/share/shwap/p2p/discovery/discovery.go similarity index 89% rename from share/p2p/discovery/discovery.go rename to share/shwap/p2p/discovery/discovery.go index fe99815d94..54a6e4dd10 100644 --- a/share/p2p/discovery/discovery.go +++ b/share/shwap/p2p/discovery/discovery.go @@ -42,8 +42,11 @@ var discoveryRetryTimeout = retryTimeout // Discovery combines advertise and discover services and allows to store discovered nodes. // TODO: The code here gets horribly hairy, so we should refactor this at some point type Discovery struct { - // Tag is used as rendezvous point for discovery service - tag string + // tag is used to specify topic for discovery + tag string + // topic contains tag and current version of discovery protocol and is used as + // rendezvous point for discovery service + topic string set *limitedSet host host.Host disc discovery.Discovery @@ -78,6 +81,7 @@ func NewDiscovery( params *Parameters, h host.Host, d discovery.Discovery, + prefix string, tag string, opts ...Option, ) (*Discovery, error) { @@ -91,6 +95,7 @@ func NewDiscovery( o := newOptions(opts...) return &Discovery{ tag: tag, + topic: fmt.Sprintf("/%s/%s", prefix, tag), set: newLimitedSet(params.PeersLimit), host: h, disc: d, @@ -116,11 +121,11 @@ func (d *Discovery) Start(context.Context) error { go d.connector.GC(ctx) if d.advertise { - log.Infow("advertising to topic", "topic", d.tag) + log.Infow("advertising to topic", "topic", d.topic) go d.Advertise(ctx) } - log.Infow("started discovery", "topic", d.tag) + log.Infow("started discovery", "topic", d.topic) return nil } @@ -147,7 +152,7 @@ func (d *Discovery) Discard(id peer.ID) bool { return false } - d.host.ConnManager().Unprotect(id, d.tag) + d.host.ConnManager().Unprotect(id, d.topic) d.connector.Backoff(id) d.set.Remove(id) d.onUpdatedPeers(id, false) @@ -170,14 +175,14 @@ func (d *Discovery) Advertise(ctx context.Context) { timer := time.NewTimer(d.params.AdvertiseInterval) defer timer.Stop() for { - log.Infof("advertising to topic %s", d.tag) - _, err := d.disc.Advertise(ctx, d.tag) + log.Infof("advertising to topic %s", d.topic) + _, err := d.disc.Advertise(ctx, d.topic) d.metrics.observeAdvertise(ctx, err) if err != nil { if ctx.Err() != nil { return } - log.Warnw("error advertising", "rendezvous", d.tag, "err", err) + log.Warnw("error advertising", "rendezvous", d.topic, "err", err) // we don't want retry indefinitely in busy loop // internal discovery mechanism may need some time before attempts @@ -195,7 +200,7 @@ func (d *Discovery) Advertise(ctx context.Context) { } } - log.Infof("successfully advertised to topic %s", d.tag) + log.Infof("successfully advertised to topic %s", d.topic) if !timer.Stop() { <-timer.C } @@ -232,7 +237,7 @@ func (d *Discovery) discoveryLoop(ctx context.Context) { log.Warnf( "Potentially degraded connectivity, unable to discover the desired amount of %s peers in %v. "+ "Number of peers discovered: %d. Required: %d.", - d.tag, logInterval, d.set.Size(), d.set.Limit(), + d.topic, logInterval, d.set.Size(), d.set.Limit(), ) } // Do not break the loop; just continue @@ -270,13 +275,13 @@ func (d *Discovery) discover(ctx context.Context) bool { size := d.set.Size() want := d.set.Limit() - size if want == 0 { - log.Debugw("reached soft peer limit, skipping discovery", "topic", d.tag, "size", size) + log.Debugw("reached soft peer limit, skipping discovery", "topic", d.topic, "size", size) return true } // TODO @renaynay: eventually, have a mechanism to catch if wanted amount of peers // has not been discovered in X amount of time so that users are warned of degraded // FN connectivity. - log.Debugw("discovering peers", "topic", d.tag, "want", want) + log.Debugw("discovering peers", "topic", d.topic, "want", want) // we use errgroup as it provide limits var wg errgroup.Group @@ -290,9 +295,9 @@ func (d *Discovery) discover(ctx context.Context) bool { findCancel() }() - peers, err := d.disc.FindPeers(findCtx, d.tag) + peers, err := d.disc.FindPeers(findCtx, d.topic) if err != nil { - log.Error("unable to start discovery", "topic", d.tag, "err", err) + log.Error("unable to start discovery", "topic", d.topic, "err", err) return false } @@ -317,12 +322,12 @@ func (d *Discovery) discover(ctx context.Context) bool { } size := d.set.Size() - log.Debugw("found peer", "topic", d.tag, "peer", peer.ID.String(), "found_amount", size) + log.Debugw("found peer", "topic", d.topic, "peer", peer.ID.String(), "found_amount", size) if size < d.set.Limit() { return nil } - log.Infow("discovered wanted peers", "topic", d.tag, "amount", size) + log.Infow("discovered wanted peers", "topic", d.topic, "amount", size) findCancel() // stop discovery when we are done return nil }) @@ -333,7 +338,7 @@ func (d *Discovery) discover(ctx context.Context) bool { isEnoughPeers := d.set.Size() >= d.set.Limit() d.metrics.observeFindPeers(ctx, isEnoughPeers) - log.Debugw("discovery finished", "topic", d.tag, "discovered_wanted", isEnoughPeers) + log.Debugw("discovery finished", "topic", d.topic, "discovered_wanted", isEnoughPeers) return isEnoughPeers } } @@ -385,7 +390,7 @@ func (d *Discovery) handleDiscoveredPeer(ctx context.Context, peer peer.AddrInfo // NOTE: This is does not protect from remote killing the connection. // In the future, we should design a protocol that keeps bidirectional agreement on whether // connection should be kept or not, similar to mesh link in GossipSub. - d.host.ConnManager().Protect(peer.ID, d.tag) + d.host.ConnManager().Protect(peer.ID, d.topic) return true } diff --git a/share/p2p/discovery/discovery_test.go b/share/shwap/p2p/discovery/discovery_test.go similarity index 98% rename from share/p2p/discovery/discovery_test.go rename to share/shwap/p2p/discovery/discovery_test.go index 8fb31de922..f900d38557 100644 --- a/share/p2p/discovery/discovery_test.go +++ b/share/shwap/p2p/discovery/discovery_test.go @@ -21,6 +21,7 @@ import ( const ( fullNodesTag = "full" + version = "v1" ) func TestDiscovery(t *testing.T) { @@ -168,7 +169,7 @@ func (t *testnet) startNewDiscovery( tag string, opts ...Option, ) *Discovery { - disc, err := NewDiscovery(params, hst, routingDisc, tag, opts...) + disc, err := NewDiscovery(params, hst, routingDisc, version, tag, opts...) require.NoError(t.T, err) err = disc.Start(t.ctx) require.NoError(t.T, err) diff --git a/share/p2p/discovery/metrics.go b/share/shwap/p2p/discovery/metrics.go similarity index 100% rename from share/p2p/discovery/metrics.go rename to share/shwap/p2p/discovery/metrics.go diff --git a/share/p2p/discovery/options.go b/share/shwap/p2p/discovery/options.go similarity index 100% rename from share/p2p/discovery/options.go rename to share/shwap/p2p/discovery/options.go diff --git a/share/p2p/discovery/set.go b/share/shwap/p2p/discovery/set.go similarity index 100% rename from share/p2p/discovery/set.go rename to share/shwap/p2p/discovery/set.go diff --git a/share/p2p/discovery/set_test.go b/share/shwap/p2p/discovery/set_test.go similarity index 100% rename from share/p2p/discovery/set_test.go rename to share/shwap/p2p/discovery/set_test.go diff --git a/share/p2p/doc.go b/share/shwap/p2p/shrex/doc.go similarity index 88% rename from share/p2p/doc.go rename to share/shwap/p2p/shrex/doc.go index 991ddf94db..9654532842 100644 --- a/share/p2p/doc.go +++ b/share/shwap/p2p/shrex/doc.go @@ -1,4 +1,4 @@ -// Package p2p provides p2p functionality that powers the share exchange protocols used by celestia-node. +// Package shrex provides functionality that powers the share exchange protocols used by celestia-node. // The available protocols are: // // - shrexsub : a floodsub-based pubsub protocol that is used to broadcast/subscribe to the event @@ -15,4 +15,4 @@ // and is primarily used by `getters.ShrexGetter` in share/getters/shrex.go. // // Find out more about each protocol in their respective sub-packages. -package p2p +package shrex diff --git a/share/shwap/p2p/shrex/error.go b/share/shwap/p2p/shrex/error.go new file mode 100644 index 0000000000..d32c3c85b2 --- /dev/null +++ b/share/shwap/p2p/shrex/error.go @@ -0,0 +1,16 @@ +package shrex + +import "errors" + +// ErrorContains reports whether any error in err's tree matches any error in targets tree. +func ErrorContains(err, target error) bool { + if errors.Is(err, target) || target == nil { + return true + } + + target = errors.Unwrap(target) + if target == nil { + return false + } + return ErrorContains(err, target) +} diff --git a/share/shwap/p2p/shrex/error_test.go b/share/shwap/p2p/shrex/error_test.go new file mode 100644 index 0000000000..d9b343931c --- /dev/null +++ b/share/shwap/p2p/shrex/error_test.go @@ -0,0 +1,112 @@ +package shrex + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ErrorContains(t *testing.T) { + err1 := errors.New("1") + err2 := errors.New("2") + + w1 := func(err error) error { + return fmt.Errorf("wrap1: %w", err) + } + w2 := func(err error) error { + return fmt.Errorf("wrap1: %w", err) + } + + type args struct { + err error + target error + } + tests := []struct { + name string + args args + want bool + }{ + { + "nil err", + args{ + err: nil, + target: err1, + }, + false, + }, + { + "nil target", + args{ + err: err1, + target: nil, + }, + true, + }, + { + "errors.Is true", + args{ + err: w1(err1), + target: err1, + }, + true, + }, + { + "errors.Is false", + args{ + err: w1(err1), + target: err2, + }, + false, + }, + { + "same wrap but different base error", + args{ + err: w1(err1), + target: w1(err2), + }, + false, + }, + { + "both wrapped true", + args{ + err: w1(err1), + target: w2(err1), + }, + true, + }, + { + "both wrapped false", + args{ + err: w1(err1), + target: w2(err2), + }, + false, + }, + { + "multierr first in slice", + args{ + err: errors.Join(w1(err1), w2(err2)), + target: w2(err1), + }, + true, + }, + { + "multierr second in slice", + args{ + err: errors.Join(w1(err1), w2(err2)), + target: w1(err2), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, + tt.want, + ErrorContains(tt.args.err, tt.args.target), + "ErrorContains(%v, %v)", tt.args.err, tt.args.target) + }) + } +} diff --git a/share/p2p/errors.go b/share/shwap/p2p/shrex/errors.go similarity index 98% rename from share/p2p/errors.go rename to share/shwap/p2p/shrex/errors.go index cb7b596f47..79ff0ed2b2 100644 --- a/share/p2p/errors.go +++ b/share/shwap/p2p/shrex/errors.go @@ -1,4 +1,4 @@ -package p2p +package shrex import ( "errors" diff --git a/share/p2p/metrics.go b/share/shwap/p2p/shrex/metrics.go similarity index 99% rename from share/p2p/metrics.go rename to share/shwap/p2p/shrex/metrics.go index 55aefda81d..9d5c605139 100644 --- a/share/p2p/metrics.go +++ b/share/shwap/p2p/shrex/metrics.go @@ -1,4 +1,4 @@ -package p2p +package shrex import ( "context" diff --git a/share/p2p/middleware.go b/share/shwap/p2p/shrex/middleware.go similarity index 98% rename from share/p2p/middleware.go rename to share/shwap/p2p/shrex/middleware.go index df0a690af7..c53a996eec 100644 --- a/share/p2p/middleware.go +++ b/share/shwap/p2p/shrex/middleware.go @@ -1,4 +1,4 @@ -package p2p +package shrex import ( "sync/atomic" diff --git a/share/p2p/params.go b/share/shwap/p2p/shrex/params.go similarity index 94% rename from share/p2p/params.go rename to share/shwap/p2p/shrex/params.go index 6636e38fc5..a5c0d5e3e2 100644 --- a/share/p2p/params.go +++ b/share/shwap/p2p/shrex/params.go @@ -1,4 +1,4 @@ -package p2p +package shrex import ( "fmt" @@ -7,6 +7,9 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" ) +// protocolString is the protocol string for the shrex protocol. +const ProtocolString = "/shrex/v0.1.0/" + // Parameters is the set of parameters that must be configured for the shrex/eds protocol. type Parameters struct { // ServerReadTimeout sets the timeout for reading messages from the stream. diff --git a/share/shwap/p2p/shrex/pb/shrex.pb.go b/share/shwap/p2p/shrex/pb/shrex.pb.go new file mode 100644 index 0000000000..88322b1985 --- /dev/null +++ b/share/shwap/p2p/shrex/pb/shrex.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/shwap/p2p/shrex/pb/shrex.proto + +package pb + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Status int32 + +const ( + Status_INVALID Status = 0 + Status_OK Status = 1 + Status_NOT_FOUND Status = 2 + Status_INTERNAL Status = 3 +) + +var Status_name = map[int32]string{ + 0: "INVALID", + 1: "OK", + 2: "NOT_FOUND", + 3: "INTERNAL", +} + +var Status_value = map[string]int32{ + "INVALID": 0, + "OK": 1, + "NOT_FOUND": 2, + "INTERNAL": 3, +} + +func (x Status) String() string { + return proto.EnumName(Status_name, int32(x)) +} + +func (Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_68dfd78ade756110, []int{0} +} + +type Response struct { + Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=Status" json:"status,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_68dfd78ade756110, []int{0} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetStatus() Status { + if m != nil { + return m.Status + } + return Status_INVALID +} + +func init() { + proto.RegisterEnum("Status", Status_name, Status_value) + proto.RegisterType((*Response)(nil), "Response") +} + +func init() { + proto.RegisterFile("share/shwap/p2p/shrex/pb/shrex.proto", fileDescriptor_68dfd78ade756110) +} + +var fileDescriptor_68dfd78ade756110 = []byte{ + // 218 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x29, 0xce, 0x48, 0x2c, + 0x4a, 0xd5, 0x2f, 0xce, 0x28, 0x4f, 0x2c, 0xd0, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, + 0xad, 0xd0, 0x2f, 0x48, 0x82, 0x30, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x95, 0xb4, 0xb9, 0x38, + 0x82, 0x52, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe4, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, + 0x4a, 0x8b, 0x25, 0x18, 0x15, 0x18, 0x35, 0xf8, 0x8c, 0xd8, 0xf5, 0x82, 0xc1, 0xdc, 0x20, 0xa8, + 0xb0, 0x96, 0x15, 0x17, 0x1b, 0x44, 0x44, 0x88, 0x9b, 0x8b, 0xdd, 0xd3, 0x2f, 0xcc, 0xd1, 0xc7, + 0xd3, 0x45, 0x80, 0x41, 0x88, 0x8d, 0x8b, 0xc9, 0xdf, 0x5b, 0x80, 0x51, 0x88, 0x97, 0x8b, 0xd3, + 0xcf, 0x3f, 0x24, 0xde, 0xcd, 0x3f, 0xd4, 0xcf, 0x45, 0x80, 0x49, 0x88, 0x87, 0x8b, 0xc3, 0xd3, + 0x2f, 0xc4, 0x35, 0xc8, 0xcf, 0xd1, 0x47, 0x80, 0xd9, 0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, + 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, + 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x6c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, + 0xf5, 0x93, 0x53, 0x73, 0x52, 0x8b, 0x4b, 0x32, 0x13, 0xf3, 0x8b, 0xd2, 0xe1, 0x6c, 0xdd, 0xbc, + 0xfc, 0x14, 0x90, 0x3f, 0xb0, 0xfb, 0x26, 0x89, 0x0d, 0xec, 0x11, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xd8, 0xb0, 0x4b, 0x58, 0xf0, 0x00, 0x00, 0x00, +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintShrex(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintShrex(dAtA []byte, offset int, v uint64) int { + offset -= sovShrex(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Response) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovShrex(uint64(m.Status)) + } + return n +} + +func sovShrex(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozShrex(x uint64) (n int) { + return sovShrex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShrex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShrex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShrex(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShrex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipShrex(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShrex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShrex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShrex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthShrex + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupShrex + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthShrex + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthShrex = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowShrex = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupShrex = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/p2p/shrexeds/pb/extended_data_square.proto b/share/shwap/p2p/shrex/pb/shrex.proto similarity index 62% rename from share/p2p/shrexeds/pb/extended_data_square.proto rename to share/shwap/p2p/shrex/pb/shrex.proto index 63750962e9..7b3a57227d 100644 --- a/share/p2p/shrexeds/pb/extended_data_square.proto +++ b/share/shwap/p2p/shrex/pb/shrex.proto @@ -1,8 +1,6 @@ syntax = "proto3"; -message EDSRequest { - bytes hash = 1; // identifies the requested EDS. -} +option go_package = "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/pb"; enum Status { INVALID = 0; @@ -11,6 +9,6 @@ enum Status { INTERNAL = 3; // internal server error } -message EDSResponse { +message Response { Status status = 1; } diff --git a/share/p2p/peers/doc.go b/share/shwap/p2p/shrex/peers/doc.go similarity index 100% rename from share/p2p/peers/doc.go rename to share/shwap/p2p/shrex/peers/doc.go diff --git a/share/p2p/peers/manager.go b/share/shwap/p2p/shrex/peers/manager.go similarity index 99% rename from share/p2p/peers/manager.go rename to share/shwap/p2p/shrex/peers/manager.go index 0d2f6ac42b..857c5ee937 100644 --- a/share/p2p/peers/manager.go +++ b/share/shwap/p2p/shrex/peers/manager.go @@ -21,7 +21,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) const ( diff --git a/share/p2p/peers/manager_test.go b/share/shwap/p2p/shrex/peers/manager_test.go similarity index 98% rename from share/p2p/peers/manager_test.go rename to share/shwap/p2p/shrex/peers/manager_test.go index 2a465dc59a..c18a2c340c 100644 --- a/share/p2p/peers/manager_test.go +++ b/share/shwap/p2p/shrex/peers/manager_test.go @@ -22,12 +22,12 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/p2p/discovery" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/discovery" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) func TestManager(t *testing.T) { - t.Run("Validate pool by headerSub", func(t *testing.T) { + t.Run("Verify pool by headerSub", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -48,7 +48,7 @@ func TestManager(t *testing.T) { stopManager(t, manager) }) - t.Run("Validate pool by shrex.Getter", func(t *testing.T) { + t.Run("Verify pool by shrex.Getter", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -76,6 +76,7 @@ func TestManager(t *testing.T) { t.Cleanup(cancel) // create headerSub mock + h := testHeader() headerSub := newSubLock(h, nil) @@ -363,6 +364,7 @@ func TestIntegration(t *testing.T) { t.Run("get peer from discovery", func(t *testing.T) { fullNodesTag := "fullNodes" + version := "v1" nw, err := mocknet.FullMeshConnected(3) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) @@ -398,6 +400,7 @@ func TestIntegration(t *testing.T) { params, bnHost, routingdisc.NewRoutingDiscovery(bnRouter), + version, fullNodesTag, ) require.NoError(t, err) @@ -434,6 +437,7 @@ func TestIntegration(t *testing.T) { params, fnHost, routingdisc.NewRoutingDiscovery(fnRouter), + version, fullNodesTag, discovery.WithOnPeersUpdate(fnPeerManager.UpdateNodePool), discovery.WithOnPeersUpdate(checkDiscoveredPeer), diff --git a/share/p2p/peers/metrics.go b/share/shwap/p2p/shrex/peers/metrics.go similarity index 99% rename from share/p2p/peers/metrics.go rename to share/shwap/p2p/shrex/peers/metrics.go index b28b263127..d401d6a4fc 100644 --- a/share/p2p/peers/metrics.go +++ b/share/shwap/p2p/shrex/peers/metrics.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/celestiaorg/celestia-node/libs/utils" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) const ( diff --git a/share/p2p/peers/options.go b/share/shwap/p2p/shrex/peers/options.go similarity index 97% rename from share/p2p/peers/options.go rename to share/shwap/p2p/shrex/peers/options.go index 2970dd2465..e268550853 100644 --- a/share/p2p/peers/options.go +++ b/share/shwap/p2p/shrex/peers/options.go @@ -7,7 +7,7 @@ import ( libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" ) type Parameters struct { diff --git a/share/p2p/peers/pool.go b/share/shwap/p2p/shrex/peers/pool.go similarity index 100% rename from share/p2p/peers/pool.go rename to share/shwap/p2p/shrex/peers/pool.go diff --git a/share/p2p/peers/pool_test.go b/share/shwap/p2p/shrex/peers/pool_test.go similarity index 100% rename from share/p2p/peers/pool_test.go rename to share/shwap/p2p/shrex/peers/pool_test.go diff --git a/share/p2p/peers/timedqueue.go b/share/shwap/p2p/shrex/peers/timedqueue.go similarity index 100% rename from share/p2p/peers/timedqueue.go rename to share/shwap/p2p/shrex/peers/timedqueue.go diff --git a/share/p2p/peers/timedqueue_test.go b/share/shwap/p2p/shrex/peers/timedqueue_test.go similarity index 100% rename from share/p2p/peers/timedqueue_test.go rename to share/shwap/p2p/shrex/peers/timedqueue_test.go diff --git a/share/p2p/recovery.go b/share/shwap/p2p/shrex/recovery.go similarity index 96% rename from share/p2p/recovery.go rename to share/shwap/p2p/shrex/recovery.go index b214969399..67bcb98d73 100644 --- a/share/p2p/recovery.go +++ b/share/shwap/p2p/shrex/recovery.go @@ -1,4 +1,4 @@ -package p2p +package shrex import ( "fmt" diff --git a/share/getters/shrex.go b/share/shwap/p2p/shrex/shrex_getter/shrex.go similarity index 74% rename from share/getters/shrex.go rename to share/shwap/p2p/shrex/shrex_getter/shrex.go index c59f5dbf6c..6d9d25115f 100644 --- a/share/getters/shrex.go +++ b/share/shwap/p2p/shrex/shrex_getter/shrex.go @@ -1,4 +1,4 @@ -package getters +package shrex_getter //nolint:revive,stylecheck // underscore in pkg name will be fixed with shrex refactoring import ( "context" @@ -6,6 +6,7 @@ import ( "fmt" "time" + logging "github.com/ipfs/go-log/v2" libpeer "github.com/libp2p/go-libp2p/core/peer" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -18,14 +19,20 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/pruner" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/p2p" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexeds" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" ) -var _ share.Getter = (*ShrexGetter)(nil) +var ( + tracer = otel.Tracer("shrex/getter") + meter = otel.Meter("shrex/getter") + log = logging.Logger("shrex/getter") +) + +var _ shwap.Getter = (*Getter)(nil) const ( // defaultMinRequestTimeout value is set according to observed time taken by healthy peer to @@ -34,8 +41,6 @@ const ( defaultMinAttemptsCount = 3 ) -var meter = otel.Meter("shrex/getter") - type metrics struct { edsAttempts metric.Int64Histogram ndAttempts metric.Int64Histogram @@ -61,7 +66,7 @@ func (m *metrics) recordNDAttempt(ctx context.Context, attemptCount int, success attribute.Bool("success", success))) } -func (sg *ShrexGetter) WithMetrics() error { +func (sg *Getter) WithMetrics() error { edsAttemptHistogram, err := meter.Int64Histogram( "getters_shrex_eds_attempts_per_request", metric.WithDescription("Number of attempts per shrex/eds request"), @@ -85,8 +90,8 @@ func (sg *ShrexGetter) WithMetrics() error { return nil } -// ShrexGetter is a share.Getter that uses the shrex/eds and shrex/nd protocol to retrieve shares. -type ShrexGetter struct { +// Getter is a share.Getter that uses the shrex/eds and shrex/nd protocol to retrieve shares. +type Getter struct { edsClient *shrexeds.Client ndClient *shrexnd.Client @@ -104,14 +109,14 @@ type ShrexGetter struct { metrics *metrics } -func NewShrexGetter( +func NewGetter( edsClient *shrexeds.Client, ndClient *shrexnd.Client, fullPeerManager *peers.Manager, archivalManager *peers.Manager, availWindow pruner.AvailabilityWindow, -) *ShrexGetter { - s := &ShrexGetter{ +) *Getter { + s := &Getter{ edsClient: edsClient, ndClient: ndClient, fullPeerManager: fullPeerManager, @@ -124,7 +129,7 @@ func NewShrexGetter( return s } -func (sg *ShrexGetter) Start(ctx context.Context) error { +func (sg *Getter) Start(ctx context.Context) error { err := sg.fullPeerManager.Start(ctx) if err != nil { return err @@ -132,7 +137,7 @@ func (sg *ShrexGetter) Start(ctx context.Context) error { return sg.archivalPeerManager.Start(ctx) } -func (sg *ShrexGetter) Stop(ctx context.Context) error { +func (sg *Getter) Stop(ctx context.Context) error { err := sg.fullPeerManager.Stop(ctx) if err != nil { return err @@ -140,11 +145,11 @@ func (sg *ShrexGetter) Stop(ctx context.Context) error { return sg.archivalPeerManager.Stop(ctx) } -func (sg *ShrexGetter) GetShare(context.Context, *header.ExtendedHeader, int, int) (share.Share, error) { - return nil, fmt.Errorf("getter/shrex: GetShare %w", errOperationNotSupported) +func (sg *Getter) GetShare(context.Context, *header.ExtendedHeader, int, int) (share.Share, error) { + return nil, fmt.Errorf("getter/shrex: GetShare %w", shwap.ErrOperationNotSupported) } -func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { +func (sg *Getter) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { var err error ctx, span := tracer.Start(ctx, "shrex/get-eds") defer func() { @@ -152,8 +157,8 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader }() // short circuit if the data root is empty - if header.DAH.Equals(share.EmptyRoot()) { - return share.EmptyExtendedDataSquare(), nil + if header.DAH.Equals(share.EmptyEDSRoots()) { + return share.EmptyEDS(), nil } var attempt int @@ -176,8 +181,8 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader } reqStart := time.Now() - reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.DAH.Hash(), peer) + reqCtx, cancel := utils.CtxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) + eds, getErr := sg.edsClient.RequestEDS(reqCtx, header.DAH, header.Height(), peer) cancel() switch { case getErr == nil: @@ -187,16 +192,16 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader case errors.Is(getErr, context.DeadlineExceeded), errors.Is(getErr, context.Canceled): setStatus(peers.ResultCooldownPeer) - case errors.Is(getErr, p2p.ErrNotFound): - getErr = share.ErrNotFound + case errors.Is(getErr, shrex.ErrNotFound): + getErr = shwap.ErrNotFound setStatus(peers.ResultCooldownPeer) - case errors.Is(getErr, p2p.ErrInvalidResponse): + case errors.Is(getErr, shrex.ErrInvalidResponse): setStatus(peers.ResultBlacklistPeer) default: setStatus(peers.ResultCooldownPeer) } - if !ErrorContains(err, getErr) { + if !shrex.ErrorContains(err, getErr) { err = errors.Join(err, getErr) } log.Debugw("eds: request failed", @@ -208,11 +213,11 @@ func (sg *ShrexGetter) GetEDS(ctx context.Context, header *header.ExtendedHeader } } -func (sg *ShrexGetter) GetSharesByNamespace( +func (sg *Getter) GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, -) (share.NamespacedShares, error) { +) (shwap.NamespaceData, error) { if err := namespace.ValidateForData(); err != nil { return nil, err } @@ -229,9 +234,9 @@ func (sg *ShrexGetter) GetSharesByNamespace( // verify that the namespace could exist inside the roots before starting network requests dah := header.DAH - roots := ipld.FilterRootByNamespace(dah, namespace) - if len(roots) == 0 { - return []share.NamespacedRow{}, nil + rowIdxs := share.RowsWithNamespace(dah, namespace) + if len(rowIdxs) == 0 { + return shwap.NamespaceData{}, nil } for { @@ -254,8 +259,8 @@ func (sg *ShrexGetter) GetSharesByNamespace( } reqStart := time.Now() - reqCtx, cancel := ctxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) - nd, getErr := sg.ndClient.RequestND(reqCtx, dah, namespace, peer) + reqCtx, cancel := utils.CtxWithSplitTimeout(ctx, sg.minAttemptsCount-attempt+1, sg.minRequestTimeout) + nd, getErr := sg.ndClient.RequestND(reqCtx, header.Height(), namespace, peer) cancel() switch { case getErr == nil: @@ -271,16 +276,16 @@ func (sg *ShrexGetter) GetSharesByNamespace( case errors.Is(getErr, context.DeadlineExceeded), errors.Is(getErr, context.Canceled): setStatus(peers.ResultCooldownPeer) - case errors.Is(getErr, p2p.ErrNotFound): - getErr = share.ErrNotFound + case errors.Is(getErr, shrex.ErrNotFound): + getErr = shwap.ErrNotFound setStatus(peers.ResultCooldownPeer) - case errors.Is(getErr, p2p.ErrInvalidResponse): + case errors.Is(getErr, shrex.ErrInvalidResponse): setStatus(peers.ResultBlacklistPeer) default: setStatus(peers.ResultCooldownPeer) } - if !ErrorContains(err, getErr) { + if !shrex.ErrorContains(err, getErr) { err = errors.Join(err, getErr) } log.Debugw("nd: request failed", @@ -293,7 +298,7 @@ func (sg *ShrexGetter) GetSharesByNamespace( } } -func (sg *ShrexGetter) getPeer( +func (sg *Getter) getPeer( ctx context.Context, header *header.ExtendedHeader, ) (libpeer.ID, peers.DoneFunc, error) { diff --git a/share/getters/shrex_test.go b/share/shwap/p2p/shrex/shrex_getter/shrex_test.go similarity index 73% rename from share/getters/shrex_test.go rename to share/shwap/p2p/shrex/shrex_getter/shrex_test.go index a474e1e618..05dc01103c 100644 --- a/share/getters/shrex_test.go +++ b/share/shwap/p2p/shrex/shrex_getter/shrex_test.go @@ -1,9 +1,10 @@ -package getters +package shrex_getter //nolint:stylecheck // underscore in pkg name will be fixed with shrex refactoring import ( "context" "encoding/binary" "errors" + "sync/atomic" "testing" "time" @@ -24,14 +25,14 @@ import ( "github.com/celestiaorg/celestia-node/pruner/full" "github.com/celestiaorg/celestia-node/pruner/light" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" - "github.com/celestiaorg/celestia-node/share/p2p/peers" - "github.com/celestiaorg/celestia-node/share/p2p/shrexeds" - "github.com/celestiaorg/celestia-node/share/p2p/shrexnd" - "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/peers" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexeds" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexnd" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub" + "github.com/celestiaorg/celestia-node/store" ) func TestShrexGetter(t *testing.T) { @@ -46,8 +47,6 @@ func TestShrexGetter(t *testing.T) { // launch eds store and put test data into it edsStore, err := newStore(t) require.NoError(t, err) - err = edsStore.Start(ctx) - require.NoError(t, err) ndClient, _ := newNDClientServer(ctx, t, edsStore, srvHost, clHost) edsClient, _ := newEDSClientServer(ctx, t, edsStore, srvHost, clHost) @@ -60,26 +59,34 @@ func TestShrexGetter(t *testing.T) { archivalPeerManager, err := testManager(ctx, clHost, sub) require.NoError(t, err) - getter := NewShrexGetter(edsClient, ndClient, fullPeerManager, archivalPeerManager, light.Window) + getter := NewGetter(edsClient, ndClient, fullPeerManager, archivalPeerManager, light.Window) require.NoError(t, getter.Start(ctx)) + height := atomic.Uint64{} + height.Add(1) + t.Run("ND_Available, total data size > 1mb", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second*10) t.Cleanup(cancel) // generate test data + size := 64 namespace := sharetest.RandV0Namespace() - randEDS, dah := edstest.RandEDSWithNamespace(t, namespace, 64) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + height := height.Add(1) + randEDS, roots := edstest.RandEDSWithNamespace(t, namespace, size*size, size) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + + err = edsStore.PutODSQ4(ctx, roots, height, randEDS) + require.NoError(t, err) fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) got, err := getter.GetSharesByNamespace(ctx, eh, namespace) require.NoError(t, err) - require.NoError(t, got.Verify(dah, namespace)) + require.NoError(t, got.Verify(roots, namespace)) }) t.Run("ND_err_not_found", func(t *testing.T) { @@ -87,15 +94,18 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - _, dah, namespace := generateTestEDS(t) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) + height := height.Add(1) + _, roots, namespace := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) _, err := getter.GetSharesByNamespace(ctx, eh, namespace) - require.ErrorIs(t, err, share.ErrNotFound) + require.ErrorIs(t, err, shwap.ErrNotFound) }) t.Run("ND_namespace_not_included", func(t *testing.T) { @@ -103,37 +113,41 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - eds, dah, maxNamespace := generateTestEDS(t) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + height := height.Add(1) + eds, roots, maxNamespace := generateTestEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) // namespace inside root range nID, err := addToNamespace(maxNamespace, -1) require.NoError(t, err) // check for namespace to be between max and min namespace in root - require.Len(t, ipld.FilterRootByNamespace(dah, nID), 1) + require.Len(t, share.RowsWithNamespace(roots, nID), 1) emptyShares, err := getter.GetSharesByNamespace(ctx, eh, nID) require.NoError(t, err) // no shares should be returned require.Nil(t, emptyShares.Flatten()) - require.Nil(t, emptyShares.Verify(dah, nID)) + require.Nil(t, emptyShares.Verify(roots, nID)) // namespace outside root range nID, err = addToNamespace(maxNamespace, 1) require.NoError(t, err) // check for namespace to be not in root - require.Len(t, ipld.FilterRootByNamespace(dah, nID), 0) + require.Len(t, share.RowsWithNamespace(roots, nID), 0) emptyShares, err = getter.GetSharesByNamespace(ctx, eh, nID) require.NoError(t, err) // no shares should be returned require.Nil(t, emptyShares.Flatten()) - require.Nil(t, emptyShares.Verify(dah, nID)) + require.Nil(t, emptyShares.Verify(roots, nID)) }) t.Run("ND_namespace_not_in_dah", func(t *testing.T) { @@ -141,24 +155,28 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - eds, dah, maxNamespace := generateTestEDS(t) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) + eds, roots, maxNamespace := generateTestEDS(t) + height := height.Add(1) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) namespace, err := addToNamespace(maxNamespace, 1) require.NoError(t, err) // check for namespace to be not in root - require.Len(t, ipld.FilterRootByNamespace(dah, namespace), 0) + require.Len(t, share.RowsWithNamespace(roots, namespace), 0) emptyShares, err := getter.GetSharesByNamespace(ctx, eh, namespace) require.NoError(t, err) // no shares should be returned require.Empty(t, emptyShares.Flatten()) - require.Nil(t, emptyShares.Verify(dah, namespace)) + require.Nil(t, emptyShares.Verify(roots, namespace)) }) t.Run("EDS_Available", func(t *testing.T) { @@ -166,12 +184,16 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - randEDS, dah, _ := generateTestEDS(t) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), randEDS)) + randEDS, roots, _ := generateTestEDS(t) + height := height.Add(1) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + + err = edsStore.PutODSQ4(ctx, roots, height, randEDS) + require.NoError(t, err) fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) got, err := getter.GetEDS(ctx, eh) @@ -183,11 +205,14 @@ func TestShrexGetter(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) // generate test data - _, dah, _ := generateTestEDS(t) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) + _, roots, _ := generateTestEDS(t) + height := height.Add(1) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) cancel() @@ -200,15 +225,18 @@ func TestShrexGetter(t *testing.T) { t.Cleanup(cancel) // generate test data - _, dah, _ := generateTestEDS(t) - eh := headertest.RandExtendedHeaderWithRoot(t, dah) + _, roots, _ := generateTestEDS(t) + height := height.Add(1) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + eh.RawHeader.Height = int64(height) + fullPeerManager.Validate(ctx, srvHost.ID(), shrexsub.Notification{ - DataHash: dah.Hash(), - Height: 1, + DataHash: roots.Hash(), + Height: height, }) _, err := getter.GetEDS(ctx, eh) - require.ErrorIs(t, err, share.ErrNotFound) + require.ErrorIs(t, err, shwap.ErrNotFound) }) // tests getPeer's ability to route requests based on whether @@ -225,7 +253,9 @@ func TestShrexGetter(t *testing.T) { getter.archivalPeerManager.UpdateNodePool(archivalPeer.ID(), true) getter.fullPeerManager.UpdateNodePool(fullPeer.ID(), true) + height := height.Add(1) eh := headertest.RandExtendedHeader(t) + eh.RawHeader.Height = int64(height) // historical data expects an archival peer eh.RawHeader.Time = time.Now().Add(-(time.Duration(full.Window) + time.Second)) @@ -241,19 +271,18 @@ func TestShrexGetter(t *testing.T) { }) } -func newStore(t *testing.T) (*eds.Store, error) { +func newStore(t *testing.T) (*store.Store, error) { t.Helper() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - return eds.NewStore(eds.DefaultParameters(), t.TempDir(), ds) + return store.NewStore(store.DefaultParameters(), t.TempDir()) } -func generateTestEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root, share.Namespace) { +func generateTestEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.AxisRoots, share.Namespace) { eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - max := nmt.MaxNamespace(dah.RowRoots[(len(dah.RowRoots))/2-1], share.NamespaceSize) - return eds, dah, max + max := nmt.MaxNamespace(roots.RowRoots[(len(roots.RowRoots))/2-1], share.NamespaceSize) + return eds, roots, max } func testManager( @@ -279,7 +308,7 @@ func testManager( } func newNDClientServer( - ctx context.Context, t *testing.T, edsStore *eds.Store, srvHost, clHost host.Host, + ctx context.Context, t *testing.T, edsStore *store.Store, srvHost, clHost host.Host, ) (*shrexnd.Client, *shrexnd.Server) { params := shrexnd.DefaultParameters() @@ -299,7 +328,7 @@ func newNDClientServer( } func newEDSClientServer( - ctx context.Context, t *testing.T, edsStore *eds.Store, srvHost, clHost host.Host, + ctx context.Context, t *testing.T, edsStore *store.Store, srvHost, clHost host.Host, ) (*shrexeds.Client, *shrexeds.Server) { params := shrexeds.DefaultParameters() diff --git a/share/p2p/shrexeds/client.go b/share/shwap/p2p/shrex/shrexeds/client.go similarity index 65% rename from share/p2p/shrexeds/client.go rename to share/shwap/p2p/shrex/shrexeds/client.go index d56e0e20f5..5239c407f9 100644 --- a/share/p2p/shrexeds/client.go +++ b/share/shwap/p2p/shrex/shrexeds/client.go @@ -16,10 +16,12 @@ import ( "github.com/celestiaorg/go-libp2p-messenger/serde" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" - "github.com/celestiaorg/celestia-node/share/p2p" - pb "github.com/celestiaorg/celestia-node/share/p2p/shrexeds/pb" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + shrexpb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/pb" ) // Client is responsible for requesting EDSs for blocksync over the ShrEx/EDS protocol. @@ -28,7 +30,7 @@ type Client struct { protocolID protocol.ID host host.Host - metrics *p2p.Metrics + metrics *shrex.Metrics } // NewClient creates a new ShrEx/EDS client. @@ -40,23 +42,27 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { return &Client{ params: params, host: host, - protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), + protocolID: shrex.ProtocolID(params.NetworkID(), protocolString), }, nil } // RequestEDS requests the ODS from the given peers and returns the EDS upon success. func (c *Client) RequestEDS( ctx context.Context, - dataHash share.DataHash, + root *share.AxisRoots, + height uint64, peer peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { - eds, err := c.doRequest(ctx, dataHash, peer) + eds, err := c.doRequest(ctx, root, height, peer) if err == nil { return eds, nil } - log.Debugw("client: eds request to peer failed", "peer", peer.String(), "hash", dataHash.String(), "error", err) + log.Debugw("client: eds request to peer failed", + "height", height, + "peer", peer.String(), + "error", err) if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + c.metrics.ObserveRequests(ctx, 1, shrex.StatusTimeout) return nil, err } // some net.Errors also mean the context deadline was exceeded, but yamux/mocknet do not @@ -64,14 +70,14 @@ func (c *Client) RequestEDS( var ne net.Error if errors.As(err, &ne) && ne.Timeout() { if deadline, _ := ctx.Deadline(); deadline.Before(time.Now()) { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + c.metrics.ObserveRequests(ctx, 1, shrex.StatusTimeout) return nil, context.DeadlineExceeded } } - if !errors.Is(err, p2p.ErrNotFound) { + if !errors.Is(err, shrex.ErrNotFound) { log.Warnw("client: eds request to peer failed", "peer", peer.String(), - "hash", dataHash.String(), + "height", height, "err", err) } @@ -80,35 +86,39 @@ func (c *Client) RequestEDS( func (c *Client) doRequest( ctx context.Context, - dataHash share.DataHash, + root *share.AxisRoots, + height uint64, to peer.ID, ) (*rsmt2d.ExtendedDataSquare, error) { streamOpenCtx, cancel := context.WithTimeout(ctx, c.params.ServerReadTimeout) defer cancel() stream, err := c.host.NewStream(streamOpenCtx, to, c.protocolID) if err != nil { - return nil, fmt.Errorf("failed to open stream: %w", err) + return nil, fmt.Errorf("open stream: %w", err) } - defer stream.Close() + defer utils.CloseAndLog(log, "client", stream) c.setStreamDeadlines(ctx, stream) - - req := &pb.EDSRequest{Hash: dataHash} - // request ODS - log.Debugw("client: requesting ods", "hash", dataHash.String(), "peer", to.String()) - _, err = serde.Write(stream, req) + log.Debugw("client: requesting ods", + "height", height, + "peer", to.String()) + id, err := shwap.NewEdsID(height) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + _, err = id.WriteTo(stream) if err != nil { - stream.Reset() //nolint:errcheck - return nil, fmt.Errorf("failed to write request to stream: %w", err) + return nil, fmt.Errorf("write request to stream: %w", err) } + err = stream.CloseWrite() if err != nil { - log.Debugw("client: error closing write", "err", err) + log.Warnw("client: error closing write", "err", err) } // read and parse status from peer - resp := new(pb.EDSResponse) + resp := new(shrexpb.Response) err = stream.SetReadDeadline(time.Now().Add(c.params.ServerReadTimeout)) if err != nil { log.Debugw("client: failed to set read deadline for reading status", "err", err) @@ -117,35 +127,33 @@ func (c *Client) doRequest( if err != nil { // server closes the stream here if we are rate limited if errors.Is(err, io.EOF) { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusRateLimited) - return nil, p2p.ErrNotFound + c.metrics.ObserveRequests(ctx, 1, shrex.StatusRateLimited) + return nil, shrex.ErrNotFound } - stream.Reset() //nolint:errcheck - return nil, fmt.Errorf("failed to read status from stream: %w", err) + return nil, fmt.Errorf("read status from stream: %w", err) } - switch resp.Status { - case pb.Status_OK: + case shrexpb.Status_OK: // reset stream deadlines to original values, since read deadline was changed during status read c.setStreamDeadlines(ctx, stream) // use header and ODS bytes to construct EDS and verify it against dataHash - eds, err := eds.ReadEDS(ctx, stream, dataHash) + eds, err := eds.ReadAccessor(ctx, stream, root) if err != nil { - return nil, fmt.Errorf("failed to read eds from ods bytes: %w", err) + return nil, fmt.Errorf("read eds from stream: %w", err) } - c.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) - return eds, nil - case pb.Status_NOT_FOUND: - c.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) - return nil, p2p.ErrNotFound - case pb.Status_INVALID: + c.metrics.ObserveRequests(ctx, 1, shrex.StatusSuccess) + return eds.ExtendedDataSquare, nil + case shrexpb.Status_NOT_FOUND: + c.metrics.ObserveRequests(ctx, 1, shrex.StatusNotFound) + return nil, shrex.ErrNotFound + case shrexpb.Status_INVALID: log.Debug("client: invalid request") fallthrough - case pb.Status_INTERNAL: + case shrexpb.Status_INTERNAL: fallthrough default: - c.metrics.ObserveRequests(ctx, 1, p2p.StatusInternalErr) - return nil, p2p.ErrInvalidResponse + c.metrics.ObserveRequests(ctx, 1, shrex.StatusInternalErr) + return nil, shrex.ErrInvalidResponse } } diff --git a/share/p2p/shrexeds/doc.go b/share/shwap/p2p/shrex/shrexeds/doc.go similarity index 100% rename from share/p2p/shrexeds/doc.go rename to share/shwap/p2p/shrex/shrexeds/doc.go diff --git a/share/p2p/shrexeds/exchange_test.go b/share/shwap/p2p/shrex/shrexeds/exchange_test.go similarity index 65% rename from share/p2p/shrexeds/exchange_test.go rename to share/shwap/p2p/shrex/shrexeds/exchange_test.go index 9155be6dec..89e1dafa1c 100644 --- a/share/p2p/shrexeds/exchange_test.go +++ b/share/shwap/p2p/shrex/shrexeds/exchange_test.go @@ -6,8 +6,6 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" libhost "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -15,9 +13,10 @@ import ( "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + "github.com/celestiaorg/celestia-node/store" ) func TestExchange_RequestEDS(t *testing.T) { @@ -25,21 +24,19 @@ func TestExchange_RequestEDS(t *testing.T) { t.Cleanup(cancel) store, client, server := makeExchange(t) - err := store.Start(ctx) - require.NoError(t, err) - - err = server.Start(ctx) + err := server.Start(ctx) require.NoError(t, err) // Testcase: EDS is immediately available t.Run("EDS_Available", func(t *testing.T) { eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - err = store.Put(ctx, dah.Hash(), eds) + height := uint64(1) + err = store.PutODSQ4(ctx, roots, height, eds) require.NoError(t, err) - requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + requestedEDS, err := client.RequestEDS(ctx, roots, height, server.host.ID()) assert.NoError(t, err) assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) }) @@ -47,19 +44,20 @@ func TestExchange_RequestEDS(t *testing.T) { // Testcase: EDS is unavailable initially, but is found after multiple requests t.Run("EDS_AvailableAfterDelay", func(t *testing.T) { eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) + height := uint64(666) lock := make(chan struct{}) go func() { <-lock - err = store.Put(ctx, dah.Hash(), eds) + err := store.PutODSQ4(ctx, roots, height, eds) require.NoError(t, err) lock <- struct{}{} }() - requestedEDS, err := client.RequestEDS(ctx, dah.Hash(), server.host.ID()) - assert.ErrorIs(t, err, p2p.ErrNotFound) + requestedEDS, err := client.RequestEDS(ctx, roots, height, server.host.ID()) + assert.ErrorIs(t, err, shrex.ErrNotFound) assert.Nil(t, requestedEDS) // unlock write @@ -67,16 +65,17 @@ func TestExchange_RequestEDS(t *testing.T) { // wait for write to finish <-lock - requestedEDS, err = client.RequestEDS(ctx, dah.Hash(), server.host.ID()) + requestedEDS, err = client.RequestEDS(ctx, roots, height, server.host.ID()) assert.NoError(t, err) assert.Equal(t, eds.Flattened(), requestedEDS.Flattened()) }) // Testcase: Invalid request excludes peer from round-robin, stopping request t.Run("EDS_InvalidRequest", func(t *testing.T) { - dataHash := []byte("invalid") - requestedEDS, err := client.RequestEDS(ctx, dataHash, server.host.ID()) - assert.ErrorContains(t, err, "stream reset") + emptyRoot := share.EmptyEDSRoots() + height := uint64(0) + requestedEDS, err := client.RequestEDS(ctx, emptyRoot, height, server.host.ID()) + assert.ErrorIs(t, err, shwap.ErrInvalidID) assert.Nil(t, requestedEDS) }) @@ -84,17 +83,17 @@ func TestExchange_RequestEDS(t *testing.T) { timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) require.NoError(t, err) - _, err = client.RequestEDS(timeoutCtx, dah.Hash(), server.host.ID()) - require.ErrorIs(t, err, p2p.ErrNotFound) + height := uint64(668) + _, err = client.RequestEDS(timeoutCtx, roots, height, server.host.ID()) + require.ErrorIs(t, err, shrex.ErrNotFound) }) // Testcase: Concurrency limit reached t.Run("EDS_concurrency_limit", func(t *testing.T) { - store, client, server := makeExchange(t) + _, client, server := makeExchange(t) - require.NoError(t, store.Start(ctx)) require.NoError(t, server.Start(ctx)) ctx, cancel := context.WithTimeout(ctx, time.Second) @@ -115,34 +114,25 @@ func TestExchange_RequestEDS(t *testing.T) { t.Fatal("timeout") } } - middleware := p2p.NewMiddleware(rateLimit) + middleware := shrex.NewMiddleware(rateLimit) server.host.SetStreamHandler(server.protocolID, middleware.RateLimitHandler(mockHandler)) // take server concurrency slots with blocked requests + emptyRoot := share.EmptyEDSRoots() for i := 0; i < rateLimit; i++ { go func(i int) { - client.RequestEDS(ctx, nil, server.host.ID()) //nolint:errcheck + client.RequestEDS(ctx, emptyRoot, 1, server.host.ID()) //nolint:errcheck }(i) } // wait until all server slots are taken wg.Wait() - _, err = client.RequestEDS(ctx, nil, server.host.ID()) - require.ErrorIs(t, err, p2p.ErrNotFound) + _, err = client.RequestEDS(ctx, emptyRoot, 1, server.host.ID()) + require.ErrorIs(t, err, shrex.ErrNotFound) }) } -func newStore(t *testing.T) *eds.Store { - t.Helper() - - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - store, err := eds.NewStore(storeCfg, t.TempDir(), ds) - require.NoError(t, err) - return store -} - func createMocknet(t *testing.T, amount int) []libhost.Host { t.Helper() @@ -152,9 +142,10 @@ func createMocknet(t *testing.T, amount int) []libhost.Host { return net.Hosts() } -func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { +func makeExchange(t *testing.T) (*store.Store, *Client, *Server) { t.Helper() - store := newStore(t) + store, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) hosts := createMocknet(t, 2) client, err := NewClient(DefaultParameters(), hosts[0]) diff --git a/share/p2p/shrexeds/params.go b/share/shwap/p2p/shrex/shrexeds/params.go similarity index 73% rename from share/p2p/shrexeds/params.go rename to share/shwap/p2p/shrex/shrexeds/params.go index 795cb313ed..c5d76fecb2 100644 --- a/share/p2p/shrexeds/params.go +++ b/share/shwap/p2p/shrex/shrexeds/params.go @@ -5,16 +5,17 @@ import ( logging "github.com/ipfs/go-log/v2" - "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" ) -const protocolString = "/shrex/eds/v0.0.1" +const protocolString = shrex.ProtocolString + shwap.EDSName var log = logging.Logger("shrex/eds") // Parameters is the set of parameters that must be configured for the shrex/eds protocol. type Parameters struct { - *p2p.Parameters + *shrex.Parameters // BufferSize defines the size of the buffer used for writing an ODS over the stream. BufferSize uint64 @@ -22,7 +23,7 @@ type Parameters struct { func DefaultParameters() *Parameters { return &Parameters{ - Parameters: p2p.DefaultParameters(), + Parameters: shrex.DefaultParameters(), BufferSize: 32 * 1024, } } @@ -36,7 +37,7 @@ func (p *Parameters) Validate() error { } func (c *Client) WithMetrics() error { - metrics, err := p2p.InitClientMetrics("eds") + metrics, err := shrex.InitClientMetrics("eds") if err != nil { return fmt.Errorf("shrex/eds: init Metrics: %w", err) } @@ -45,7 +46,7 @@ func (c *Client) WithMetrics() error { } func (s *Server) WithMetrics() error { - metrics, err := p2p.InitServerMetrics("eds") + metrics, err := shrex.InitServerMetrics("eds") if err != nil { return fmt.Errorf("shrex/eds: init Metrics: %w", err) } diff --git a/share/shwap/p2p/shrex/shrexeds/server.go b/share/shwap/p2p/shrex/shrexeds/server.go new file mode 100644 index 0000000000..0367e7d82e --- /dev/null +++ b/share/shwap/p2p/shrex/shrexeds/server.go @@ -0,0 +1,194 @@ +package shrexeds + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "go.uber.org/zap" + + "github.com/celestiaorg/go-libp2p-messenger/serde" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + shrexpb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/pb" + "github.com/celestiaorg/celestia-node/store" +) + +// Server is responsible for serving ODSs for blocksync over the ShrEx/EDS protocol. +type Server struct { + cancel context.CancelFunc + + host host.Host + protocolID protocol.ID + + store *store.Store + + params *Parameters + middleware *shrex.Middleware + metrics *shrex.Metrics +} + +// NewServer creates a new ShrEx/EDS server. +func NewServer(params *Parameters, host host.Host, store *store.Store) (*Server, error) { + if err := params.Validate(); err != nil { + return nil, fmt.Errorf("shrex-eds: server creation failed: %w", err) + } + + return &Server{ + host: host, + store: store, + protocolID: shrex.ProtocolID(params.NetworkID(), protocolString), + params: params, + middleware: shrex.NewMiddleware(params.ConcurrencyLimit), + }, nil +} + +func (s *Server) Start(context.Context) error { + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + + s.host.SetStreamHandler(s.protocolID, s.middleware.RateLimitHandler(s.streamHandler(ctx))) + return nil +} + +func (s *Server) Stop(context.Context) error { + defer s.cancel() + s.host.RemoveStreamHandler(s.protocolID) + return nil +} + +func (s *Server) observeRateLimitedRequests() { + numRateLimited := s.middleware.DrainCounter() + if numRateLimited > 0 { + s.metrics.ObserveRequests(context.Background(), numRateLimited, shrex.StatusRateLimited) + } +} + +func (s *Server) streamHandler(ctx context.Context) network.StreamHandler { + return func(stream network.Stream) { + err := s.handleEDS(ctx, stream) + if err != nil { + stream.Reset() //nolint:errcheck + return + } + s.metrics.ObserveRequests(ctx, 1, shrex.StatusSuccess) + if err = stream.Close(); err != nil { + log.Debugw("server: closing stream", "err", err) + } + } +} + +func (s *Server) handleEDS(ctx context.Context, stream network.Stream) error { + logger := log.With("peer", stream.Conn().RemotePeer().String()) + logger.Debug("server: handling eds request") + // observe rate limited requests is draining the counter for rate limited requests + // since last handleStream call. This is not optimal observing strategy, but it is + // good enough for now. Will be improved in shrex unification PR. + s.observeRateLimitedRequests() + + // read request from stream to get the dataHash for store lookup + id, err := s.readRequest(logger, stream) + if err != nil { + logger.Warnw("server: reading request from stream", "err", err) + return err + } + + logger = logger.With("height", id.Height) + + ctx, cancel := context.WithTimeout(ctx, s.params.HandleRequestTimeout) + defer cancel() + + // determine whether the EDS is available in our store + // we do not close the reader, so that other requests will not need to re-open the file. + // closing is handled by the LRU cache. + file, err := s.store.GetByHeight(ctx, id.Height) + var status shrexpb.Status + switch { + case err == nil: + defer utils.CloseAndLog(logger, "file", file) + status = shrexpb.Status_OK + case errors.Is(err, store.ErrNotFound): + logger.Warnw("server: request height not found") + s.metrics.ObserveRequests(ctx, 1, shrex.StatusNotFound) + status = shrexpb.Status_NOT_FOUND + case err != nil: + logger.Errorw("server: get file", "err", err) + status = shrexpb.Status_INTERNAL + } + + // inform the client of our status + err = s.writeStatus(logger, status, stream) + if err != nil { + logger.Warnw("server: writing status to stream", "err", err) + return err + } + // if we cannot serve the EDS, we are already done + if status != shrexpb.Status_OK { + return nil + } + + // start streaming the ODS to the client + err = s.writeODS(logger, file, stream) + if err != nil { + logger.Warnw("server: writing ods to stream", "err", err) + return err + } + return nil +} + +func (s *Server) readRequest(logger *zap.SugaredLogger, stream network.Stream) (shwap.EdsID, error) { + err := stream.SetReadDeadline(time.Now().Add(s.params.ServerReadTimeout)) + if err != nil { + logger.Debugw("server: set read deadline", "err", err) + } + + edsID := shwap.EdsID{} + _, err = edsID.ReadFrom(stream) + if err != nil { + return shwap.EdsID{}, fmt.Errorf("reading request: %w", err) + } + err = stream.CloseRead() + if err != nil { + logger.Warnw("server: closing read", "err", err) + } + return edsID, nil +} + +func (s *Server) writeStatus(logger *zap.SugaredLogger, status shrexpb.Status, stream network.Stream) error { + err := stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("server: set write deadline", "err", err) + } + + resp := &shrexpb.Response{Status: status} + _, err = serde.Write(stream, resp) + return err +} + +func (s *Server) writeODS(logger *zap.SugaredLogger, streamer eds.Streamer, stream network.Stream) error { + reader, err := streamer.Reader() + if err != nil { + return fmt.Errorf("getting ODS reader: %w", err) + } + err = stream.SetWriteDeadline(time.Now().Add(s.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("server: set read deadline", "err", err) + } + + buf := make([]byte, s.params.BufferSize) + n, err := io.CopyBuffer(stream, reader, buf) + if err != nil { + return fmt.Errorf("written: %v, writing ODS bytes: %w", n, err) + } + + logger.Debugw("server: wrote ODS", "bytes", n) + return nil +} diff --git a/share/p2p/shrexnd/client.go b/share/shwap/p2p/shrex/shrexnd/client.go similarity index 56% rename from share/p2p/shrexnd/client.go rename to share/shwap/p2p/shrex/shrexnd/client.go index 4fb6c8904b..a7029a348a 100644 --- a/share/p2p/shrexnd/client.go +++ b/share/shwap/p2p/shrex/shrexnd/client.go @@ -14,11 +14,12 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/celestiaorg/go-libp2p-messenger/serde" - "github.com/celestiaorg/nmt" + "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/p2p" - pb "github.com/celestiaorg/celestia-node/share/p2p/shrexnd/pb" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + shrexpb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/pb" ) // Client implements client side of shrex/nd protocol to obtain namespaced shares data from remote @@ -28,7 +29,7 @@ type Client struct { protocolID protocol.ID host host.Host - metrics *p2p.Metrics + metrics *shrex.Metrics } // NewClient creates a new shrEx/nd client @@ -39,29 +40,29 @@ func NewClient(params *Parameters, host host.Host) (*Client, error) { return &Client{ host: host, - protocolID: p2p.ProtocolID(params.NetworkID(), protocolString), + protocolID: shrex.ProtocolID(params.NetworkID(), protocolString), params: params, }, nil } // RequestND requests namespaced data from the given peer. -// Returns NamespacedShares with unverified inclusion proofs against the share.Root. +// Returns NamespaceData with unverified inclusion proofs against the share.Root. func (c *Client) RequestND( ctx context.Context, - root *share.Root, + height uint64, namespace share.Namespace, peer peer.ID, -) (share.NamespacedShares, error) { +) (shwap.NamespaceData, error) { if err := namespace.ValidateForData(); err != nil { return nil, err } - shares, err := c.doRequest(ctx, root, namespace, peer) + shares, err := c.doRequest(ctx, height, namespace, peer) if err == nil { return shares, nil } if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + c.metrics.ObserveRequests(ctx, 1, shrex.StatusTimeout) return nil, err } // some net.Errors also mean the context deadline was exceeded, but yamux/mocknet do not @@ -69,11 +70,11 @@ func (c *Client) RequestND( var ne net.Error if errors.As(err, &ne) && ne.Timeout() { if deadline, _ := ctx.Deadline(); deadline.Before(time.Now()) { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusTimeout) + c.metrics.ObserveRequests(ctx, 1, shrex.StatusTimeout) return nil, context.DeadlineExceeded } } - if !errors.Is(err, p2p.ErrNotFound) && !errors.Is(err, p2p.ErrRateLimited) { + if !errors.Is(err, shrex.ErrNotFound) && errors.Is(err, shrex.ErrRateLimited) { log.Warnw("client-nd: peer returned err", "err", err) } return nil, err @@ -81,51 +82,60 @@ func (c *Client) RequestND( func (c *Client) doRequest( ctx context.Context, - root *share.Root, + height uint64, namespace share.Namespace, peerID peer.ID, -) (share.NamespacedShares, error) { - stream, err := c.host.NewStream(ctx, peerID, c.protocolID) +) (shwap.NamespaceData, error) { + streamOpenCtx, cancel := context.WithTimeout(ctx, c.params.ServerReadTimeout) + defer cancel() + stream, err := c.host.NewStream(streamOpenCtx, peerID, c.protocolID) if err != nil { return nil, err } - defer stream.Close() + defer utils.CloseAndLog(log, "client", stream) c.setStreamDeadlines(ctx, stream) - req := &pb.GetSharesByNamespaceRequest{ - RootHash: root.Hash(), - Namespace: namespace, + req, err := shwap.NewNamespaceDataID(height, namespace) + if err != nil { + return nil, fmt.Errorf("client-nd: creating request: %w", err) } - _, err = serde.Write(stream, req) + _, err = req.WriteTo(stream) if err != nil { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusSendReqErr) - stream.Reset() //nolint:errcheck + c.metrics.ObserveRequests(ctx, 1, shrex.StatusSendReqErr) return nil, fmt.Errorf("client-nd: writing request: %w", err) } err = stream.CloseWrite() if err != nil { - log.Debugw("client-nd: closing write side of the stream", "err", err) + log.Warnw("client-nd: closing write side of the stream", "err", err) } if err := c.readStatus(ctx, stream); err != nil { + c.metrics.ObserveRequests(ctx, 1, shrex.StatusReadRespErr) return nil, err } - return c.readNamespacedShares(ctx, stream) + + nd := shwap.NamespaceData{} + _, err = nd.ReadFrom(stream) + if err != nil { + c.metrics.ObserveRequests(ctx, 1, shrex.StatusReadRespErr) + return nil, err + } + return nd, nil } func (c *Client) readStatus(ctx context.Context, stream network.Stream) error { - var resp pb.GetSharesByNamespaceStatusResponse + var resp shrexpb.Response _, err := serde.Read(stream, &resp) if err != nil { // server is overloaded and closed the stream if errors.Is(err, io.EOF) { - c.metrics.ObserveRequests(ctx, 1, p2p.StatusRateLimited) - return p2p.ErrRateLimited + c.metrics.ObserveRequests(ctx, 1, shrex.StatusRateLimited) + return shrex.ErrRateLimited } - c.metrics.ObserveRequests(ctx, 1, p2p.StatusReadRespErr) + c.metrics.ObserveRequests(ctx, 1, shrex.StatusReadRespErr) stream.Reset() //nolint:errcheck return fmt.Errorf("client-nd: reading status response: %w", err) } @@ -133,49 +143,6 @@ func (c *Client) readStatus(ctx context.Context, stream network.Stream) error { return c.convertStatusToErr(ctx, resp.Status) } -// readNamespacedShares converts proto Rows to share.NamespacedShares -func (c *Client) readNamespacedShares( - ctx context.Context, - stream network.Stream, -) (share.NamespacedShares, error) { - var shares share.NamespacedShares - for { - var row pb.NamespaceRowResponse - _, err := serde.Read(stream, &row) - if err != nil { - if errors.Is(err, io.EOF) { - // all data is received and steam is closed by server - return shares, nil - } - c.metrics.ObserveRequests(ctx, 1, p2p.StatusReadRespErr) - return nil, err - } - var proof nmt.Proof - if row.Proof != nil { - if len(row.Shares) != 0 { - proof = nmt.NewInclusionProof( - int(row.Proof.Start), - int(row.Proof.End), - row.Proof.Nodes, - row.Proof.IsMaxNamespaceIgnored, - ) - } else { - proof = nmt.NewAbsenceProof( - int(row.Proof.Start), - int(row.Proof.End), - row.Proof.Nodes, - row.Proof.LeafHash, - row.Proof.IsMaxNamespaceIgnored, - ) - } - } - shares = append(shares, share.NamespacedRow{ - Shares: row.Shares, - Proof: &proof, - }) - } -} - func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) { // set read/write deadline to use context deadline if it exists deadline, ok := ctx.Deadline() @@ -204,20 +171,20 @@ func (c *Client) setStreamDeadlines(ctx context.Context, stream network.Stream) } } -func (c *Client) convertStatusToErr(ctx context.Context, status pb.StatusCode) error { +func (c *Client) convertStatusToErr(ctx context.Context, status shrexpb.Status) error { switch status { - case pb.StatusCode_OK: - c.metrics.ObserveRequests(ctx, 1, p2p.StatusSuccess) + case shrexpb.Status_OK: + c.metrics.ObserveRequests(ctx, 1, shrex.StatusSuccess) return nil - case pb.StatusCode_NOT_FOUND: - c.metrics.ObserveRequests(ctx, 1, p2p.StatusNotFound) - return p2p.ErrNotFound - case pb.StatusCode_INVALID: + case shrexpb.Status_NOT_FOUND: + c.metrics.ObserveRequests(ctx, 1, shrex.StatusNotFound) + return shrex.ErrNotFound + case shrexpb.Status_INVALID: log.Warn("client-nd: invalid request") fallthrough - case pb.StatusCode_INTERNAL: + case shrexpb.Status_INTERNAL: fallthrough default: - return p2p.ErrInvalidResponse + return shrex.ErrInvalidResponse } } diff --git a/share/p2p/shrexnd/doc.go b/share/shwap/p2p/shrex/shrexnd/doc.go similarity index 100% rename from share/p2p/shrexnd/doc.go rename to share/shwap/p2p/shrex/shrexnd/doc.go diff --git a/share/p2p/shrexnd/exchange_test.go b/share/shwap/p2p/shrex/shrexnd/exchange_test.go similarity index 68% rename from share/p2p/shrexnd/exchange_test.go rename to share/shwap/p2p/shrex/shrexnd/exchange_test.go index cb8bbe9d74..81abdfba26 100644 --- a/share/p2p/shrexnd/exchange_test.go +++ b/share/shwap/p2p/shrex/shrexnd/exchange_test.go @@ -3,38 +3,39 @@ package shrexnd import ( "context" "sync" + "sync/atomic" "testing" "time" - "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" libhost "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/p2p" "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + "github.com/celestiaorg/celestia-node/store" ) func TestExchange_RequestND_NotFound(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) t.Cleanup(cancel) edsStore, client, server := makeExchange(t) - require.NoError(t, edsStore.Start(ctx)) require.NoError(t, server.Start(ctx)) + height := atomic.Uint64{} + height.Add(1) + t.Run("CAR_not_exist", func(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) t.Cleanup(cancel) - root := share.Root{} namespace := sharetest.RandV0Namespace() - _, err := client.RequestND(ctx, &root, namespace, server.host.ID()) - require.ErrorIs(t, err, p2p.ErrNotFound) + height := height.Add(1) + _, err := client.RequestND(ctx, height, namespace, server.host.ID()) + require.ErrorIs(t, err, shrex.ErrNotFound) }) t.Run("ErrNamespaceNotFound", func(t *testing.T) { @@ -42,12 +43,15 @@ func TestExchange_RequestND_NotFound(t *testing.T) { t.Cleanup(cancel) eds := edstest.RandEDS(t, 4) - dah, err := share.NewRoot(eds) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + height := height.Add(1) + err = edsStore.PutODSQ4(ctx, roots, height, eds) require.NoError(t, err) - require.NoError(t, edsStore.Put(ctx, dah.Hash(), eds)) namespace := sharetest.RandV0Namespace() - emptyShares, err := client.RequestND(ctx, dah, namespace, server.host.ID()) + emptyShares, err := client.RequestND(ctx, height, namespace, server.host.ID()) require.NoError(t, err) require.Empty(t, emptyShares.Flatten()) }) @@ -83,34 +87,24 @@ func TestExchange_RequestND(t *testing.T) { t.Fatal("timeout") } } - middleware := p2p.NewMiddleware(rateLimit) + middleware := shrex.NewMiddleware(rateLimit) server.host.SetStreamHandler(server.protocolID, middleware.RateLimitHandler(mockHandler)) // take server concurrency slots with blocked requests for i := 0; i < rateLimit; i++ { go func(i int) { - client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) //nolint:errcheck + client.RequestND(ctx, 1, sharetest.RandV0Namespace(), server.host.ID()) //nolint:errcheck }(i) } // wait until all server slots are taken wg.Wait() - _, err = client.RequestND(ctx, nil, sharetest.RandV0Namespace(), server.host.ID()) - require.ErrorIs(t, err, p2p.ErrRateLimited) + _, err = client.RequestND(ctx, 1, sharetest.RandV0Namespace(), server.host.ID()) + require.ErrorIs(t, err, shrex.ErrRateLimited) }) } -func newStore(t *testing.T) *eds.Store { - t.Helper() - - storeCfg := eds.DefaultParameters() - ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) - store, err := eds.NewStore(storeCfg, t.TempDir(), ds) - require.NoError(t, err) - return store -} - func createMocknet(t *testing.T, amount int) []libhost.Host { t.Helper() @@ -120,15 +114,16 @@ func createMocknet(t *testing.T, amount int) []libhost.Host { return net.Hosts() } -func makeExchange(t *testing.T) (*eds.Store, *Client, *Server) { +func makeExchange(t *testing.T) (*store.Store, *Client, *Server) { t.Helper() - store := newStore(t) + s, err := store.NewStore(store.DefaultParameters(), t.TempDir()) + require.NoError(t, err) hosts := createMocknet(t, 2) client, err := NewClient(DefaultParameters(), hosts[0]) require.NoError(t, err) - server, err := NewServer(DefaultParameters(), hosts[1], store) + server, err := NewServer(DefaultParameters(), hosts[1], s) require.NoError(t, err) - return store, client, server + return s, client, server } diff --git a/share/p2p/shrexnd/params.go b/share/shwap/p2p/shrex/shrexnd/params.go similarity index 61% rename from share/p2p/shrexnd/params.go rename to share/shwap/p2p/shrex/shrexnd/params.go index 8489627a07..5544ae6b27 100644 --- a/share/p2p/shrexnd/params.go +++ b/share/shwap/p2p/shrex/shrexnd/params.go @@ -5,22 +5,23 @@ import ( logging "github.com/ipfs/go-log/v2" - "github.com/celestiaorg/celestia-node/share/p2p" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" ) -const protocolString = "/shrex/nd/v0.0.3" +const protocolString = shrex.ProtocolString + shwap.NamespaceDataName var log = logging.Logger("shrex/nd") // Parameters is the set of parameters that must be configured for the shrex/eds protocol. -type Parameters = p2p.Parameters +type Parameters = shrex.Parameters func DefaultParameters() *Parameters { - return p2p.DefaultParameters() + return shrex.DefaultParameters() } func (c *Client) WithMetrics() error { - metrics, err := p2p.InitClientMetrics("nd") + metrics, err := shrex.InitClientMetrics("nd") if err != nil { return fmt.Errorf("shrex/nd: init Metrics: %w", err) } @@ -29,7 +30,7 @@ func (c *Client) WithMetrics() error { } func (srv *Server) WithMetrics() error { - metrics, err := p2p.InitServerMetrics("nd") + metrics, err := shrex.InitServerMetrics("nd") if err != nil { return fmt.Errorf("shrex/nd: init Metrics: %w", err) } diff --git a/share/shwap/p2p/shrex/shrexnd/server.go b/share/shwap/p2p/shrex/shrexnd/server.go new file mode 100644 index 0000000000..0193c91ae1 --- /dev/null +++ b/share/shwap/p2p/shrex/shrexnd/server.go @@ -0,0 +1,222 @@ +package shrexnd + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "go.uber.org/zap" + + "github.com/celestiaorg/go-libp2p-messenger/serde" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" + "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex" + shrexpb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/pb" + "github.com/celestiaorg/celestia-node/store" +) + +// Server implements server side of shrex/nd protocol to serve namespaced share to remote +// peers. +type Server struct { + cancel context.CancelFunc + + host host.Host + protocolID protocol.ID + + handler network.StreamHandler + store *store.Store + + params *Parameters + middleware *shrex.Middleware + metrics *shrex.Metrics +} + +// NewServer creates new Server +func NewServer(params *Parameters, host host.Host, store *store.Store) (*Server, error) { + if err := params.Validate(); err != nil { + return nil, fmt.Errorf("shrex-nd: server creation failed: %w", err) + } + + srv := &Server{ + store: store, + host: host, + params: params, + protocolID: shrex.ProtocolID(params.NetworkID(), protocolString), + middleware: shrex.NewMiddleware(params.ConcurrencyLimit), + } + + ctx, cancel := context.WithCancel(context.Background()) + srv.cancel = cancel + + srv.handler = srv.middleware.RateLimitHandler(srv.streamHandler(ctx)) + return srv, nil +} + +// Start starts the server +func (srv *Server) Start(context.Context) error { + srv.host.SetStreamHandler(srv.protocolID, srv.handler) + return nil +} + +// Stop stops the server +func (srv *Server) Stop(context.Context) error { + srv.cancel() + srv.host.RemoveStreamHandler(srv.protocolID) + return nil +} + +func (srv *Server) streamHandler(ctx context.Context) network.StreamHandler { + return func(s network.Stream) { + err := srv.handleNamespaceData(ctx, s) + if err != nil { + s.Reset() //nolint:errcheck + return + } + if err = s.Close(); err != nil { + log.Debugw("server: closing stream", "err", err) + } + } +} + +// SetHandler sets server handler +func (srv *Server) SetHandler(handler network.StreamHandler) { + srv.handler = handler +} + +func (srv *Server) observeRateLimitedRequests() { + numRateLimited := srv.middleware.DrainCounter() + if numRateLimited > 0 { + srv.metrics.ObserveRequests(context.Background(), numRateLimited, shrex.StatusRateLimited) + } +} + +func (srv *Server) handleNamespaceData(ctx context.Context, stream network.Stream) error { + logger := log.With("source", "server", "peer", stream.Conn().RemotePeer().String()) + logger.Debug("handling nd request") + + srv.observeRateLimitedRequests() + ndid, err := srv.readRequest(logger, stream) + if err != nil { + logger.Warnw("read request", "err", err) + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusBadRequest) + return err + } + + logger = logger.With( + "namespace", ndid.DataNamespace.String(), + "height", ndid.Height, + ) + logger.Debugw("new request") + + ctx, cancel := context.WithTimeout(ctx, srv.params.HandleRequestTimeout) + defer cancel() + + nd, status, err := srv.getNamespaceData(ctx, ndid) + if err != nil { + // server should respond with status regardless if there was an error getting data + sendErr := srv.respondStatus(ctx, logger, stream, status) + if sendErr != nil { + logger.Errorw("sending response", "err", sendErr) + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusSendRespErr) + } + logger.Errorw("handling request", "err", err) + return errors.Join(err, sendErr) + } + + err = srv.respondStatus(ctx, logger, stream, status) + if err != nil { + logger.Errorw("sending response", "err", err) + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusSendRespErr) + return err + } + + _, err = nd.WriteTo(stream) + if err != nil { + logger.Errorw("send nd data", "err", err) + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusSendRespErr) + return err + } + return nil +} + +func (srv *Server) readRequest( + logger *zap.SugaredLogger, + stream network.Stream, +) (shwap.NamespaceDataID, error) { + err := stream.SetReadDeadline(time.Now().Add(srv.params.ServerReadTimeout)) + if err != nil { + logger.Debugw("setting read deadline", "err", err) + } + + ndid := shwap.NamespaceDataID{} + _, err = ndid.ReadFrom(stream) + if err != nil { + return shwap.NamespaceDataID{}, fmt.Errorf("reading request: %w", err) + } + + err = stream.CloseRead() + if err != nil { + logger.Warnw("closing read side of the stream", "err", err) + } + + return ndid, nil +} + +func (srv *Server) getNamespaceData( + ctx context.Context, + id shwap.NamespaceDataID, +) (shwap.NamespaceData, shrexpb.Status, error) { + file, err := srv.store.GetByHeight(ctx, id.Height) + if errors.Is(err, store.ErrNotFound) { + return nil, shrexpb.Status_NOT_FOUND, nil + } + if err != nil { + return nil, shrexpb.Status_INTERNAL, fmt.Errorf("retrieving DAH: %w", err) + } + defer utils.CloseAndLog(log, "file", file) + + nd, err := eds.NamespaceData(ctx, file, id.DataNamespace) + if err != nil { + return nil, shrexpb.Status_INVALID, fmt.Errorf("getting nd: %w", err) + } + + return nd, shrexpb.Status_OK, nil +} + +func (srv *Server) respondStatus( + ctx context.Context, + logger *zap.SugaredLogger, + stream network.Stream, + status shrexpb.Status, +) error { + srv.observeStatus(ctx, status) + + err := stream.SetWriteDeadline(time.Now().Add(srv.params.ServerWriteTimeout)) + if err != nil { + logger.Debugw("setting write deadline", "err", err) + } + + _, err = serde.Write(stream, &shrexpb.Response{Status: status}) + if err != nil { + return fmt.Errorf("writing response: %w", err) + } + + return nil +} + +func (srv *Server) observeStatus(ctx context.Context, status shrexpb.Status) { + switch { + case status == shrexpb.Status_OK: + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusSuccess) + case status != shrexpb.Status_NOT_FOUND: + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusNotFound) + case status == shrexpb.Status_INVALID: + srv.metrics.ObserveRequests(ctx, 1, shrex.StatusInternalErr) + } +} diff --git a/share/p2p/shrexsub/doc.go b/share/shwap/p2p/shrex/shrexsub/doc.go similarity index 100% rename from share/p2p/shrexsub/doc.go rename to share/shwap/p2p/shrex/shrexsub/doc.go diff --git a/share/p2p/shrexsub/pb/notification.pb.go b/share/shwap/p2p/shrex/shrexsub/pb/notification.pb.go similarity index 85% rename from share/p2p/shrexsub/pb/notification.pb.go rename to share/shwap/p2p/shrex/shrexsub/pb/notification.pb.go index e154dc62b7..c7cddbba5c 100644 --- a/share/p2p/shrexsub/pb/notification.pb.go +++ b/share/shwap/p2p/shrex/shrexsub/pb/notification.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: share/p2p/shrexsub/pb/notification.proto +// source: share/shwap/p2p/shrex/shrexsub/pb/notification.proto package share_p2p_shrex_sub @@ -31,7 +31,7 @@ func (m *RecentEDSNotification) Reset() { *m = RecentEDSNotification{} } func (m *RecentEDSNotification) String() string { return proto.CompactTextString(m) } func (*RecentEDSNotification) ProtoMessage() {} func (*RecentEDSNotification) Descriptor() ([]byte, []int) { - return fileDescriptor_1a6ade914b560e62, []int{0} + return fileDescriptor_c16b670e7e556100, []int{0} } func (m *RecentEDSNotification) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,22 +79,23 @@ func init() { } func init() { - proto.RegisterFile("share/p2p/shrexsub/pb/notification.proto", fileDescriptor_1a6ade914b560e62) + proto.RegisterFile("share/shwap/p2p/shrex/shrexsub/pb/notification.proto", fileDescriptor_c16b670e7e556100) } -var fileDescriptor_1a6ade914b560e62 = []byte{ - // 176 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x28, 0xce, 0x48, 0x2c, - 0x4a, 0xd5, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, 0xad, 0x28, 0x2e, 0x4d, 0xd2, 0x2f, - 0x48, 0xd2, 0xcf, 0xcb, 0x2f, 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, - 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0xab, 0xd4, 0x2b, 0x30, 0x2a, 0xd0, 0x03, 0xab, 0xd4, - 0x2b, 0x2e, 0x4d, 0x52, 0xf2, 0xe1, 0x12, 0x0d, 0x4a, 0x4d, 0x4e, 0xcd, 0x2b, 0x71, 0x75, 0x09, - 0xf6, 0x43, 0xd2, 0x23, 0x24, 0xc6, 0xc5, 0x96, 0x91, 0x9a, 0x99, 0x9e, 0x51, 0x22, 0xc1, 0xa8, - 0xc0, 0xa8, 0xc1, 0x12, 0x04, 0xe5, 0x09, 0x49, 0x73, 0x71, 0xa6, 0x24, 0x96, 0x24, 0xc6, 0x67, - 0x24, 0x16, 0x67, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xf0, 0x04, 0x71, 0x80, 0x04, 0x3c, 0x12, 0x8b, - 0x33, 0x9c, 0x24, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, - 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89, 0x0d, 0xec, - 0x06, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0x16, 0xea, 0xc6, 0xaf, 0x00, 0x00, 0x00, +var fileDescriptor_c16b670e7e556100 = []byte{ + // 183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x29, 0xce, 0x48, 0x2c, + 0x4a, 0xd5, 0x2f, 0xce, 0x28, 0x4f, 0x2c, 0xd0, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0xce, 0x28, 0x4a, + 0xad, 0x80, 0x90, 0xc5, 0xa5, 0x49, 0xfa, 0x05, 0x49, 0xfa, 0x79, 0xf9, 0x25, 0x99, 0x69, 0x99, + 0xc9, 0x89, 0x25, 0x99, 0xf9, 0x79, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x60, 0x5d, + 0x7a, 0x05, 0x46, 0x05, 0x7a, 0x60, 0x95, 0x7a, 0xc5, 0xa5, 0x49, 0x4a, 0x3e, 0x5c, 0xa2, 0x41, + 0xa9, 0xc9, 0xa9, 0x79, 0x25, 0xae, 0x2e, 0xc1, 0x7e, 0x48, 0x7a, 0x84, 0xc4, 0xb8, 0xd8, 0x32, + 0x52, 0x33, 0xd3, 0x33, 0x4a, 0x24, 0x18, 0x15, 0x18, 0x35, 0x58, 0x82, 0xa0, 0x3c, 0x21, 0x69, + 0x2e, 0xce, 0x94, 0xc4, 0x92, 0xc4, 0xf8, 0x8c, 0xc4, 0xe2, 0x0c, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0x9e, 0x20, 0x0e, 0x90, 0x80, 0x47, 0x62, 0x71, 0x86, 0x93, 0xc4, 0x89, 0x47, 0x72, 0x8c, 0x17, + 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, + 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xdd, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc0, + 0x55, 0x8a, 0x06, 0xbb, 0x00, 0x00, 0x00, } func (m *RecentEDSNotification) Marshal() (dAtA []byte, err error) { diff --git a/share/p2p/shrexsub/pb/notification.proto b/share/shwap/p2p/shrex/shrexsub/pb/notification.proto similarity index 100% rename from share/p2p/shrexsub/pb/notification.proto rename to share/shwap/p2p/shrex/shrexsub/pb/notification.proto diff --git a/share/p2p/shrexsub/pubsub.go b/share/shwap/p2p/shrex/shrexsub/pubsub.go similarity index 93% rename from share/p2p/shrexsub/pubsub.go rename to share/shwap/p2p/shrex/shrexsub/pubsub.go index 774fb436fa..d1861cfe12 100644 --- a/share/p2p/shrexsub/pubsub.go +++ b/share/shwap/p2p/shrex/shrexsub/pubsub.go @@ -10,14 +10,14 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/celestiaorg/celestia-node/share" - pb "github.com/celestiaorg/celestia-node/share/p2p/shrexsub/pb" + pb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub/pb" ) var log = logging.Logger("shrex-sub") // pubsubTopic hardcodes the name of the EDS floodsub topic with the provided networkID. func pubsubTopicID(networkID string) string { - return fmt.Sprintf("%s/eds-sub/v0.1.0", networkID) + return fmt.Sprintf("%s/eds-sub/v0.2.0", networkID) } // ValidatorFn is an injectable func and governs EDS notification msg validity. @@ -111,7 +111,7 @@ func (v ValidatorFn) validate(ctx context.Context, p peer.ID, msg *pubsub.Messag DataHash: pbmsg.DataHash, Height: pbmsg.Height, } - if n.Height == 0 || n.DataHash.IsEmptyRoot() || n.DataHash.Validate() != nil { + if n.Height == 0 || n.DataHash.IsEmptyEDS() || n.DataHash.Validate() != nil { // hard reject malicious height (height 0 does not exist) and // empty/invalid datahashes return pubsub.ValidationReject @@ -129,7 +129,7 @@ func (s *PubSub) Subscribe() (*Subscription, error) { // Broadcast sends the EDS notification (DataHash) to every connected peer. func (s *PubSub) Broadcast(ctx context.Context, notification Notification) error { - if notification.DataHash.IsEmptyRoot() { + if notification.DataHash.IsEmptyEDS() { // no need to broadcast datahash of an empty block EDS return nil } diff --git a/share/p2p/shrexsub/pubsub_test.go b/share/shwap/p2p/shrex/shrexsub/pubsub_test.go similarity index 97% rename from share/p2p/shrexsub/pubsub_test.go rename to share/shwap/p2p/shrex/shrexsub/pubsub_test.go index 5938a414a2..59602da29b 100644 --- a/share/p2p/shrexsub/pubsub_test.go +++ b/share/shwap/p2p/shrex/shrexsub/pubsub_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/rand" - pb "github.com/celestiaorg/celestia-node/share/p2p/shrexsub/pb" + pb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub/pb" ) func TestPubSub(t *testing.T) { diff --git a/share/p2p/shrexsub/subscription.go b/share/shwap/p2p/shrex/shrexsub/subscription.go similarity index 94% rename from share/p2p/shrexsub/subscription.go rename to share/shwap/p2p/shrex/shrexsub/subscription.go index 32a3e65e51..5021f090c2 100644 --- a/share/p2p/shrexsub/subscription.go +++ b/share/shwap/p2p/shrex/shrexsub/subscription.go @@ -6,7 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" - pb "github.com/celestiaorg/celestia-node/share/p2p/shrexsub/pb" + pb "github.com/celestiaorg/celestia-node/share/shwap/p2p/shrex/shrexsub/pb" ) // Subscription is a wrapper over pubsub.Subscription that handles diff --git a/share/shwap/pb/shwap.pb.go b/share/shwap/pb/shwap.pb.go new file mode 100644 index 0000000000..000bf78ca7 --- /dev/null +++ b/share/shwap/pb/shwap.pb.go @@ -0,0 +1,1114 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: share/shwap/pb/shwap.proto + +package pb + +import ( + fmt "fmt" + pb "github.com/celestiaorg/nmt/pb" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type AxisType int32 + +const ( + AxisType_ROW AxisType = 0 + AxisType_COL AxisType = 1 +) + +var AxisType_name = map[int32]string{ + 0: "ROW", + 1: "COL", +} + +var AxisType_value = map[string]int32{ + "ROW": 0, + "COL": 1, +} + +func (x AxisType) String() string { + return proto.EnumName(AxisType_name, int32(x)) +} + +func (AxisType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9431653f3c9f0bcb, []int{0} +} + +type Row_HalfSide int32 + +const ( + Row_LEFT Row_HalfSide = 0 + Row_RIGHT Row_HalfSide = 1 +) + +var Row_HalfSide_name = map[int32]string{ + 0: "LEFT", + 1: "RIGHT", +} + +var Row_HalfSide_value = map[string]int32{ + "LEFT": 0, + "RIGHT": 1, +} + +func (x Row_HalfSide) String() string { + return proto.EnumName(Row_HalfSide_name, int32(x)) +} + +func (Row_HalfSide) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9431653f3c9f0bcb, []int{0, 0} +} + +type Row struct { + SharesHalf []*Share `protobuf:"bytes,1,rep,name=shares_half,json=sharesHalf,proto3" json:"shares_half,omitempty"` + HalfSide Row_HalfSide `protobuf:"varint,2,opt,name=half_side,json=halfSide,proto3,enum=shwap.Row_HalfSide" json:"half_side,omitempty"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { + return fileDescriptor_9431653f3c9f0bcb, []int{0} +} +func (m *Row) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Row.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Row) XXX_Merge(src proto.Message) { + xxx_messageInfo_Row.Merge(m, src) +} +func (m *Row) XXX_Size() int { + return m.Size() +} +func (m *Row) XXX_DiscardUnknown() { + xxx_messageInfo_Row.DiscardUnknown(m) +} + +var xxx_messageInfo_Row proto.InternalMessageInfo + +func (m *Row) GetSharesHalf() []*Share { + if m != nil { + return m.SharesHalf + } + return nil +} + +func (m *Row) GetHalfSide() Row_HalfSide { + if m != nil { + return m.HalfSide + } + return Row_LEFT +} + +type Sample struct { + Share *Share `protobuf:"bytes,1,opt,name=share,proto3" json:"share,omitempty"` + Proof *pb.Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + ProofType AxisType `protobuf:"varint,3,opt,name=proof_type,json=proofType,proto3,enum=shwap.AxisType" json:"proof_type,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_9431653f3c9f0bcb, []int{1} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetShare() *Share { + if m != nil { + return m.Share + } + return nil +} + +func (m *Sample) GetProof() *pb.Proof { + if m != nil { + return m.Proof + } + return nil +} + +func (m *Sample) GetProofType() AxisType { + if m != nil { + return m.ProofType + } + return AxisType_ROW +} + +type RowNamespaceData struct { + Shares []*Share `protobuf:"bytes,1,rep,name=shares,proto3" json:"shares,omitempty"` + Proof *pb.Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *RowNamespaceData) Reset() { *m = RowNamespaceData{} } +func (m *RowNamespaceData) String() string { return proto.CompactTextString(m) } +func (*RowNamespaceData) ProtoMessage() {} +func (*RowNamespaceData) Descriptor() ([]byte, []int) { + return fileDescriptor_9431653f3c9f0bcb, []int{2} +} +func (m *RowNamespaceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RowNamespaceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RowNamespaceData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RowNamespaceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_RowNamespaceData.Merge(m, src) +} +func (m *RowNamespaceData) XXX_Size() int { + return m.Size() +} +func (m *RowNamespaceData) XXX_DiscardUnknown() { + xxx_messageInfo_RowNamespaceData.DiscardUnknown(m) +} + +var xxx_messageInfo_RowNamespaceData proto.InternalMessageInfo + +func (m *RowNamespaceData) GetShares() []*Share { + if m != nil { + return m.Shares + } + return nil +} + +func (m *RowNamespaceData) GetProof() *pb.Proof { + if m != nil { + return m.Proof + } + return nil +} + +type Share struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Share) Reset() { *m = Share{} } +func (m *Share) String() string { return proto.CompactTextString(m) } +func (*Share) ProtoMessage() {} +func (*Share) Descriptor() ([]byte, []int) { + return fileDescriptor_9431653f3c9f0bcb, []int{3} +} +func (m *Share) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Share) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Share.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Share) XXX_Merge(src proto.Message) { + xxx_messageInfo_Share.Merge(m, src) +} +func (m *Share) XXX_Size() int { + return m.Size() +} +func (m *Share) XXX_DiscardUnknown() { + xxx_messageInfo_Share.DiscardUnknown(m) +} + +var xxx_messageInfo_Share proto.InternalMessageInfo + +func (m *Share) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterEnum("shwap.AxisType", AxisType_name, AxisType_value) + proto.RegisterEnum("shwap.Row_HalfSide", Row_HalfSide_name, Row_HalfSide_value) + proto.RegisterType((*Row)(nil), "shwap.Row") + proto.RegisterType((*Sample)(nil), "shwap.Sample") + proto.RegisterType((*RowNamespaceData)(nil), "shwap.RowNamespaceData") + proto.RegisterType((*Share)(nil), "shwap.Share") +} + +func init() { proto.RegisterFile("share/shwap/pb/shwap.proto", fileDescriptor_9431653f3c9f0bcb) } + +var fileDescriptor_9431653f3c9f0bcb = []byte{ + // 381 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x6b, 0xe2, 0x40, + 0x18, 0xc6, 0x33, 0x1b, 0xe3, 0xc6, 0x57, 0xd1, 0x30, 0x7b, 0x09, 0xee, 0x92, 0x95, 0xb0, 0x0b, + 0xb2, 0x60, 0xb2, 0xe8, 0x27, 0xd8, 0xbf, 0xb5, 0x60, 0x6b, 0x19, 0x85, 0x42, 0x2f, 0x61, 0x62, + 0x46, 0x13, 0x88, 0x9d, 0x21, 0x49, 0x49, 0x3d, 0xf7, 0xd0, 0x6b, 0x3f, 0x56, 0x8f, 0x1e, 0x7b, + 0x2c, 0xfa, 0x45, 0x4a, 0x26, 0xb1, 0x14, 0xda, 0x43, 0x6f, 0xbf, 0xcc, 0xf3, 0xcc, 0xbc, 0xcf, + 0x13, 0x5e, 0xe8, 0xa6, 0x21, 0x4d, 0x98, 0x9b, 0x86, 0x39, 0x15, 0xae, 0xf0, 0x4b, 0x70, 0x44, + 0xc2, 0x33, 0x8e, 0x35, 0xf9, 0xd1, 0x6d, 0x0b, 0xdf, 0x15, 0x09, 0xe7, 0xcb, 0xf2, 0xd8, 0xbe, + 0x45, 0xa0, 0x12, 0x9e, 0xe3, 0x01, 0x34, 0xe5, 0xe5, 0xd4, 0x0b, 0x69, 0xbc, 0x34, 0x51, 0x4f, + 0xed, 0x37, 0x87, 0x2d, 0xa7, 0x7c, 0x61, 0x56, 0x28, 0x04, 0x4a, 0xc3, 0x98, 0xc6, 0x4b, 0xfc, + 0x13, 0x1a, 0x85, 0xcf, 0x4b, 0xa3, 0x80, 0x99, 0x1f, 0x7a, 0xa8, 0xdf, 0x1e, 0x7e, 0xaa, 0xcc, + 0x84, 0xe7, 0x4e, 0xe1, 0x99, 0x45, 0x01, 0x23, 0x7a, 0x58, 0x91, 0xfd, 0x15, 0xf4, 0xc3, 0x29, + 0xd6, 0xa1, 0x36, 0xf9, 0xf7, 0x7f, 0x6e, 0x28, 0xb8, 0x01, 0x1a, 0x39, 0x3e, 0x1a, 0xcf, 0x0d, + 0x64, 0xdf, 0x20, 0xa8, 0xcf, 0xe8, 0x5a, 0xc4, 0x0c, 0xdb, 0xa0, 0xc9, 0x59, 0x26, 0xea, 0xa1, + 0x57, 0x31, 0x4a, 0x09, 0x7f, 0x07, 0x4d, 0xf6, 0x90, 0xd3, 0x9b, 0xc3, 0x8e, 0x53, 0xb5, 0xf2, + 0x9d, 0xb3, 0x02, 0x48, 0xa9, 0x62, 0x07, 0x40, 0x82, 0x97, 0x6d, 0x04, 0x33, 0x55, 0x99, 0xb4, + 0x53, 0xbd, 0xf7, 0xeb, 0x3a, 0x4a, 0xe7, 0x1b, 0xc1, 0x48, 0x43, 0x5a, 0x0a, 0xb4, 0x3d, 0x30, + 0x08, 0xcf, 0x4f, 0xe9, 0x9a, 0xa5, 0x82, 0x2e, 0xd8, 0x5f, 0x9a, 0x51, 0xfc, 0x0d, 0xea, 0x65, + 0xf5, 0x37, 0x7f, 0x4b, 0xa5, 0xbd, 0x33, 0x90, 0xfd, 0x19, 0x34, 0x79, 0x0f, 0x63, 0xa8, 0x05, + 0x34, 0xa3, 0xb2, 0x63, 0x8b, 0x48, 0xfe, 0xf1, 0x05, 0xf4, 0x43, 0x28, 0xfc, 0x11, 0x54, 0x32, + 0x3d, 0x37, 0x94, 0x02, 0xfe, 0x4c, 0x27, 0x06, 0xfa, 0x7d, 0x72, 0xbf, 0xb3, 0xd0, 0x76, 0x67, + 0xa1, 0xc7, 0x9d, 0x85, 0xee, 0xf6, 0x96, 0xb2, 0xdd, 0x5b, 0xca, 0xc3, 0xde, 0x52, 0x2e, 0x46, + 0xab, 0x28, 0x0b, 0xaf, 0x7c, 0x67, 0xc1, 0xd7, 0xee, 0x82, 0xc5, 0x2c, 0xcd, 0x22, 0xca, 0x93, + 0xd5, 0x33, 0x0f, 0x2e, 0x79, 0x50, 0xec, 0xc5, 0xcb, 0xed, 0xf0, 0xeb, 0x72, 0x03, 0x46, 0x4f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb6, 0xc0, 0x8b, 0x36, 0x02, 0x00, 0x00, +} + +func (m *Row) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Row) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Row) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HalfSide != 0 { + i = encodeVarintShwap(dAtA, i, uint64(m.HalfSide)) + i-- + dAtA[i] = 0x10 + } + if len(m.SharesHalf) > 0 { + for iNdEx := len(m.SharesHalf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SharesHalf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShwap(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProofType != 0 { + i = encodeVarintShwap(dAtA, i, uint64(m.ProofType)) + i-- + dAtA[i] = 0x18 + } + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShwap(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Share != nil { + { + size, err := m.Share.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShwap(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RowNamespaceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RowNamespaceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RowNamespaceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShwap(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Shares) > 0 { + for iNdEx := len(m.Shares) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Shares[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintShwap(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Share) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Share) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Share) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintShwap(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintShwap(dAtA []byte, offset int, v uint64) int { + offset -= sovShwap(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Row) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SharesHalf) > 0 { + for _, e := range m.SharesHalf { + l = e.Size() + n += 1 + l + sovShwap(uint64(l)) + } + } + if m.HalfSide != 0 { + n += 1 + sovShwap(uint64(m.HalfSide)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Share != nil { + l = m.Share.Size() + n += 1 + l + sovShwap(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovShwap(uint64(l)) + } + if m.ProofType != 0 { + n += 1 + sovShwap(uint64(m.ProofType)) + } + return n +} + +func (m *RowNamespaceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shares) > 0 { + for _, e := range m.Shares { + l = e.Size() + n += 1 + l + sovShwap(uint64(l)) + } + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovShwap(uint64(l)) + } + return n +} + +func (m *Share) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovShwap(uint64(l)) + } + return n +} + +func sovShwap(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozShwap(x uint64) (n int) { + return sovShwap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Row) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Row: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Row: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SharesHalf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShwap + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShwap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SharesHalf = append(m.SharesHalf, &Share{}) + if err := m.SharesHalf[len(m.SharesHalf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HalfSide", wireType) + } + m.HalfSide = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HalfSide |= Row_HalfSide(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShwap(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShwap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Share", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShwap + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShwap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Share == nil { + m.Share = &Share{} + } + if err := m.Share.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShwap + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShwap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &pb.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofType", wireType) + } + m.ProofType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProofType |= AxisType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipShwap(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShwap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowNamespaceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowNamespaceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowNamespaceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shares", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShwap + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShwap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shares = append(m.Shares, &Share{}) + if err := m.Shares[len(m.Shares)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthShwap + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthShwap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &pb.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShwap(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShwap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Share) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Share: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Share: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowShwap + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthShwap + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthShwap + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipShwap(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthShwap + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipShwap(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShwap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShwap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowShwap + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthShwap + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupShwap + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthShwap + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthShwap = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowShwap = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupShwap = fmt.Errorf("proto: unexpected end of group") +) diff --git a/share/shwap/pb/shwap.proto b/share/shwap/pb/shwap.proto new file mode 100644 index 0000000000..d7daea568a --- /dev/null +++ b/share/shwap/pb/shwap.proto @@ -0,0 +1,36 @@ +// Defined in CIP-19 https://github.com/celestiaorg/CIPs/blob/82aeb7dfc472105a11babffd548c730c899a3d24/cips/cip-19.md +syntax = "proto3"; +package shwap; +option go_package = "github.com/celestiaorg/celestia-node/share/shwap/pb"; + +import "pb/proof.proto"; // celestiaorg/nmt/pb/proof.proto + +message Row { + repeated Share shares_half = 1; + HalfSide half_side= 2; + + enum HalfSide { + LEFT = 0; + RIGHT = 1; + } +} + +message Sample { + Share share = 1; + proof.pb.Proof proof = 2; + AxisType proof_type = 3; +} + +message RowNamespaceData { + repeated Share shares = 1; + proof.pb.Proof proof = 2; +} + +message Share { + bytes data = 1; +} + +enum AxisType { + ROW = 0; + COL = 1; +} diff --git a/share/shwap/row.go b/share/shwap/row.go new file mode 100644 index 0000000000..4e362beee2 --- /dev/null +++ b/share/shwap/row.go @@ -0,0 +1,150 @@ +package shwap + +import ( + "bytes" + "fmt" + + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +// RowName is the name identifier for the row container. +const RowName = "row_v0" + +// RowSide enumerates the possible sides of a row within an Extended Data Square (EDS). +type RowSide int + +const ( + Left RowSide = iota // Left side of the row. + Right // Right side of the row. +) + +// Row represents a portion of a row in an EDS, either left or right half. +type Row struct { + halfShares []share.Share // halfShares holds the shares of either the left or right half of a row. + side RowSide // side indicates whether the row half is left or right. +} + +// NewRow creates a new Row with the specified shares and side. +func NewRow(halfShares []share.Share, side RowSide) Row { + return Row{ + halfShares: halfShares, + side: side, + } +} + +// RowFromShares constructs a new Row from an Extended Data Square based on the specified index and +// side. +func RowFromShares(shares []share.Share, side RowSide) Row { + var halfShares []share.Share + if side == Right { + halfShares = shares[len(shares)/2:] // Take the right half of the shares. + } else { + halfShares = shares[:len(shares)/2] // Take the left half of the shares. + } + + return NewRow(halfShares, side) +} + +// RowFromProto converts a protobuf Row to a Row structure. +func RowFromProto(r *pb.Row) Row { + return Row{ + halfShares: SharesFromProto(r.SharesHalf), + side: sideFromProto(r.GetHalfSide()), + } +} + +// Shares reconstructs the complete row shares from the half provided, using RSMT2D for data +// recovery if needed. +func (r Row) Shares() ([]share.Share, error) { + shares := make([]share.Share, len(r.halfShares)*2) + offset := 0 + if r.side == Right { + offset = len(r.halfShares) // Position the halfShares in the second half if it's the right side. + } + for i, share := range r.halfShares { + shares[i+offset] = share + } + return share.DefaultRSMT2DCodec().Decode(shares) +} + +// ToProto converts the Row to its protobuf representation. +func (r Row) ToProto() *pb.Row { + return &pb.Row{ + SharesHalf: SharesToProto(r.halfShares), + HalfSide: r.side.ToProto(), + } +} + +// IsEmpty reports whether the Row is empty, i.e. doesn't contain any shares. +func (r Row) IsEmpty() bool { + return r.halfShares == nil +} + +// Verify checks if the row's shares match the expected number from the root data and validates +// the side of the row. +func (r Row) Verify(roots *share.AxisRoots, idx int) error { + if len(r.halfShares) == 0 { + return fmt.Errorf("empty half row") + } + expectedShares := len(roots.RowRoots) / 2 + if len(r.halfShares) != expectedShares { + return fmt.Errorf("shares size doesn't match root size: %d != %d", len(r.halfShares), expectedShares) + } + if err := ValidateShares(r.halfShares); err != nil { + return fmt.Errorf("invalid shares: %w", err) + } + if r.side != Left && r.side != Right { + return fmt.Errorf("invalid RowSide: %d", r.side) + } + + if err := r.verifyInclusion(roots, idx); err != nil { + return fmt.Errorf("%w: %w", ErrFailedVerification, err) + } + return nil +} + +// verifyInclusion verifies the integrity of the row's shares against the provided root hash for the +// given row index. +func (r Row) verifyInclusion(roots *share.AxisRoots, idx int) error { + shrs, err := r.Shares() + if err != nil { + return fmt.Errorf("while extending shares: %w", err) + } + + sqrLn := uint64(len(shrs) / 2) + tree := wrapper.NewErasuredNamespacedMerkleTree(sqrLn, uint(idx)) + for _, s := range shrs { + if err := tree.Push(s); err != nil { + return fmt.Errorf("while pushing shares to NMT: %w", err) + } + } + + root, err := tree.Root() + if err != nil { + return fmt.Errorf("while computing NMT root: %w", err) + } + + if !bytes.Equal(roots.RowRoots[idx], root) { + return fmt.Errorf("invalid root hash: %X != %X", root, roots.RowRoots[idx]) + } + return nil +} + +// ToProto converts a RowSide to its protobuf representation. +func (s RowSide) ToProto() pb.Row_HalfSide { + if s == Left { + return pb.Row_LEFT + } + return pb.Row_RIGHT +} + +// sideFromProto converts a protobuf Row_HalfSide back to a RowSide. +func sideFromProto(side pb.Row_HalfSide) RowSide { + if side == pb.Row_LEFT { + return Left + } + return Right +} diff --git a/share/shwap/row_id.go b/share/shwap/row_id.go new file mode 100644 index 0000000000..acc0495ef4 --- /dev/null +++ b/share/shwap/row_id.go @@ -0,0 +1,125 @@ +package shwap + +import ( + "encoding/binary" + "fmt" + "io" +) + +// RowIDSize defines the size in bytes of RowID, consisting of the size of EdsID and 2 bytes for +// RowIndex. +const RowIDSize = EdsIDSize + 2 + +// RowID uniquely identifies a row in the data square of a blockchain block, combining block height +// with the row's index. +type RowID struct { + EdsID // Embedding EdsID to include the block height in RowID. + RowIndex int // RowIndex specifies the position of the row within the data square. +} + +// NewRowID creates a new RowID with the specified block height, row index, and EDS size. +// It returns an error if the validation fails, ensuring the RowID +// conforms to expected constraints. +func NewRowID(height uint64, rowIdx, edsSize int) (RowID, error) { + rid := RowID{ + EdsID: EdsID{ + Height: height, + }, + RowIndex: rowIdx, + } + if err := rid.Verify(edsSize); err != nil { + return RowID{}, fmt.Errorf("verifying RowID: %w", err) + } + + return rid, nil +} + +// RowIDFromBinary decodes a RowID from its binary representation. +// It returns an error if the input data does not conform to the expected size or content format. +func RowIDFromBinary(data []byte) (RowID, error) { + if len(data) != RowIDSize { + return RowID{}, fmt.Errorf("invalid RowID data length: expected %d, got %d", RowIDSize, len(data)) + } + eid, err := EdsIDFromBinary(data[:EdsIDSize]) + if err != nil { + return RowID{}, fmt.Errorf("decoding EdsID: %w", err) + } + + rid := RowID{ + EdsID: eid, + RowIndex: int(binary.BigEndian.Uint16(data[EdsIDSize:])), + } + if err := rid.Validate(); err != nil { + return RowID{}, fmt.Errorf("validating RowID: %w", err) + } + + return rid, nil +} + +// Equals checks equality of RowID. +func (rid *RowID) Equals(other RowID) bool { + return rid.EdsID.Equals(other.EdsID) && rid.RowIndex == other.RowIndex +} + +// ReadFrom reads the binary form of RowID from the provided reader. +func (rid *RowID) ReadFrom(r io.Reader) (int64, error) { + data := make([]byte, RowIDSize) + n, err := io.ReadFull(r, data) + if err != nil { + return int64(n), err + } + if n != RowIDSize { + return int64(n), fmt.Errorf("RowID: expected %d bytes, got %d", RowIDSize, n) + } + id, err := RowIDFromBinary(data) + if err != nil { + return int64(n), fmt.Errorf("RowIDFromBinary: %w", err) + } + *rid = id + return int64(n), nil +} + +// MarshalBinary encodes the RowID into a binary form for storage or network transmission. +func (rid RowID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, RowIDSize) + return rid.appendTo(data), nil +} + +// WriteTo writes the binary form of RowID to the provided writer. +func (rid RowID) WriteTo(w io.Writer) (int64, error) { + data, err := rid.MarshalBinary() + if err != nil { + return 0, err + } + n, err := w.Write(data) + return int64(n), err +} + +// Verify validates the RowID fields and verifies that RowIndex is within the bounds of +// the square size +func (rid RowID) Verify(edsSize int) error { + if edsSize == 0 { + return fmt.Errorf("provided EDS size is zero") + } + + if rid.RowIndex >= edsSize { + return fmt.Errorf("%w, RowIndex: %d >= %d", ErrOutOfBounds, rid.RowIndex, edsSize) + } + + return rid.Validate() +} + +// Validate performs basic field validation. +func (rid RowID) Validate() error { + if rid.RowIndex < 0 { + return fmt.Errorf("%w: RowIndex: %d < 0", ErrInvalidID, rid.RowIndex) + } + return rid.EdsID.Validate() +} + +// appendTo assists in binary encoding of RowID by appending the encoded fields to the given byte +// slice. +func (rid RowID) appendTo(data []byte) []byte { + data = rid.EdsID.appendTo(data) + return binary.BigEndian.AppendUint16(data, uint16(rid.RowIndex)) +} diff --git a/share/shwap/row_id_test.go b/share/shwap/row_id_test.go new file mode 100644 index 0000000000..fa26315665 --- /dev/null +++ b/share/shwap/row_id_test.go @@ -0,0 +1,46 @@ +package shwap + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRowID(t *testing.T) { + edsSize := 4 + + id, err := NewRowID(2, 1, edsSize) + require.NoError(t, err) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + idOut, err := RowIDFromBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, idOut) + + err = idOut.Verify(edsSize) + require.NoError(t, err) + require.True(t, id.Equals(idOut)) +} + +func TestRowIDReaderWriter(t *testing.T) { + edsSize := 4 + + id, err := NewRowID(2, 1, edsSize) + require.NoError(t, err) + + buf := bytes.NewBuffer(nil) + n, err := id.WriteTo(buf) + require.NoError(t, err) + require.Equal(t, int64(RowIDSize), n) + + ridOut := RowID{} + n, err = ridOut.ReadFrom(buf) + require.NoError(t, err) + require.Equal(t, int64(RowIDSize), n) + + require.EqualValues(t, id, ridOut) +} diff --git a/share/shwap/row_namespace_data.go b/share/shwap/row_namespace_data.go new file mode 100644 index 0000000000..e92f264aa0 --- /dev/null +++ b/share/shwap/row_namespace_data.go @@ -0,0 +1,216 @@ +package shwap + +import ( + "errors" + "fmt" + "io" + + "github.com/celestiaorg/celestia-app/v2/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" + "github.com/celestiaorg/go-libp2p-messenger/serde" + "github.com/celestiaorg/nmt" + nmt_pb "github.com/celestiaorg/nmt/pb" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +// RowNamespaceDataName is the name identifier for the row namespace data container. +const RowNamespaceDataName = "rnd_v0" + +// ErrNamespaceOutsideRange is returned by RowNamespaceDataFromShares when the target namespace is +// outside of the namespace range for the given row. In this case, the implementation cannot return +// the non-inclusion proof and will return ErrNamespaceOutsideRange. +var ErrNamespaceOutsideRange = errors.New("target namespace is outside of namespace range for the given root") + +// RowNamespaceData holds shares and their corresponding proof for a single row within a namespace. +type RowNamespaceData struct { + Shares []share.Share `json:"shares"` // Shares within the namespace. + Proof *nmt.Proof `json:"proof"` // Proof of the shares' inclusion in the namespace. +} + +// RowNamespaceDataFromShares extracts and constructs a RowNamespaceData from shares within the +// specified namespace. +func RowNamespaceDataFromShares( + shares []share.Share, + namespace share.Namespace, + rowIndex int, +) (RowNamespaceData, error) { + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares)/2), uint(rowIndex)) + nmtTree := nmt.New( + appconsts.NewBaseHashFunc(), + nmt.NamespaceIDSize(appconsts.NamespaceSize), + nmt.IgnoreMaxNamespace(true), + ) + tree.SetTree(nmtTree) + + for _, shr := range shares { + if err := tree.Push(shr); err != nil { + return RowNamespaceData{}, fmt.Errorf("failed to build tree for row %d: %w", rowIndex, err) + } + } + + root, err := tree.Root() + if err != nil { + return RowNamespaceData{}, fmt.Errorf("failed to get root for row %d: %w", rowIndex, err) + } + if namespace.IsOutsideRange(root, root) { + return RowNamespaceData{}, ErrNamespaceOutsideRange + } + + var from, count int + for i := range len(shares) / 2 { + if namespace.Equals(share.GetNamespace(shares[i])) { + if count == 0 { + from = i + } + count++ + continue + } + if count > 0 { + break + } + } + + // if count is 0, then the namespace is not present in the shares. Return non-inclusion proof. + if count == 0 { + proof, err := nmtTree.ProveNamespace(namespace.ToNMT()) + if err != nil { + return RowNamespaceData{}, fmt.Errorf("failed to generate non-inclusion proof for row %d: %w", rowIndex, err) + } + + return RowNamespaceData{ + Proof: &proof, + }, nil + } + + namespacedShares := make([]share.Share, count) + copy(namespacedShares, shares[from:from+count]) + + proof, err := tree.ProveRange(from, from+count) + if err != nil { + return RowNamespaceData{}, fmt.Errorf("failed to generate proof for row %d: %w", rowIndex, err) + } + + return RowNamespaceData{ + Shares: namespacedShares, + Proof: &proof, + }, nil +} + +// RowNamespaceDataFromProto constructs RowNamespaceData out of its protobuf representation. +func RowNamespaceDataFromProto(row *pb.RowNamespaceData) RowNamespaceData { + var proof nmt.Proof + if row.GetProof().GetLeafHash() != nil { + proof = nmt.NewAbsenceProof( + int(row.GetProof().GetStart()), + int(row.GetProof().GetEnd()), + row.GetProof().GetNodes(), + row.GetProof().GetLeafHash(), + row.GetProof().GetIsMaxNamespaceIgnored(), + ) + } else { + proof = nmt.NewInclusionProof( + int(row.GetProof().GetStart()), + int(row.GetProof().GetEnd()), + row.GetProof().GetNodes(), + row.GetProof().GetIsMaxNamespaceIgnored(), + ) + } + + return RowNamespaceData{ + Shares: SharesFromProto(row.GetShares()), + Proof: &proof, + } +} + +// ToProto converts RowNamespaceData to its protobuf representation for serialization. +func (rnd RowNamespaceData) ToProto() *pb.RowNamespaceData { + return &pb.RowNamespaceData{ + Shares: SharesToProto(rnd.Shares), + Proof: &nmt_pb.Proof{ + Start: int64(rnd.Proof.Start()), + End: int64(rnd.Proof.End()), + Nodes: rnd.Proof.Nodes(), + LeafHash: rnd.Proof.LeafHash(), + IsMaxNamespaceIgnored: rnd.Proof.IsMaxNamespaceIDIgnored(), + }, + } +} + +// IsEmpty reports whether the RowNamespaceData is empty, i.e. doesn't contain a proof. +func (rnd RowNamespaceData) IsEmpty() bool { + return rnd.Proof == nil +} + +// Verify checks validity of the RowNamespaceData against the AxisRoots, Namespace and Row index. +func (rnd RowNamespaceData) Verify(roots *share.AxisRoots, namespace share.Namespace, rowIdx int) error { + if rnd.Proof == nil || rnd.Proof.IsEmptyProof() { + return fmt.Errorf("nil proof") + } + if len(rnd.Shares) == 0 && !rnd.Proof.IsOfAbsence() { + return fmt.Errorf("empty shares with non-absence proof for row %d", rowIdx) + } + + if len(rnd.Shares) > 0 && rnd.Proof.IsOfAbsence() { + return fmt.Errorf("non-empty shares with absence proof for row %d", rowIdx) + } + + if err := ValidateShares(rnd.Shares); err != nil { + return fmt.Errorf("invalid shares: %w", err) + } + + rowRoot := roots.RowRoots[rowIdx] + if namespace.IsOutsideRange(rowRoot, rowRoot) { + return fmt.Errorf("namespace out of range for row %d", rowIdx) + } + + if !rnd.verifyInclusion(rowRoot, namespace) { + return fmt.Errorf("%w for row: %d", ErrFailedVerification, rowIdx) + } + return nil +} + +// verifyInclusion checks the inclusion of the row's shares in the provided root using NMT. +func (rnd RowNamespaceData) verifyInclusion(rowRoot []byte, namespace share.Namespace) bool { + leaves := make([][]byte, 0, len(rnd.Shares)) + for _, sh := range rnd.Shares { + namespaceBytes := share.GetNamespace(sh) + leave := make([]byte, len(sh)+len(namespaceBytes)) + copy(leave, namespaceBytes) + copy(leave[len(namespaceBytes):], sh) + leaves = append(leaves, leave) + } + + return rnd.Proof.VerifyNamespace( + share.NewSHA256Hasher(), + namespace.ToNMT(), + leaves, + rowRoot, + ) +} + +// ReadFrom reads length-delimited protobuf representation of RowNamespaceData +// implementing io.ReaderFrom. +func (rnd *RowNamespaceData) ReadFrom(reader io.Reader) (int64, error) { + var pbrnd pb.RowNamespaceData + n, err := serde.Read(reader, &pbrnd) + if err != nil { + return int64(n), fmt.Errorf("reading RowNamespaceData: %w", err) + } + + *rnd = RowNamespaceDataFromProto(&pbrnd) + return int64(n), nil +} + +// WriteTo writes length-delimited protobuf representation of RowNamespaceData. +// implementing io.WriterTo. +func (rnd RowNamespaceData) WriteTo(writer io.Writer) (int64, error) { + pbrnd := rnd.ToProto() + n, err := serde.Write(writer, pbrnd) + if err != nil { + return int64(n), fmt.Errorf("writing RowNamespaceData: %w", err) + } + + return int64(n), nil +} diff --git a/share/shwap/row_namespace_data_id.go b/share/shwap/row_namespace_data_id.go new file mode 100644 index 0000000000..9ae87e0ee4 --- /dev/null +++ b/share/shwap/row_namespace_data_id.go @@ -0,0 +1,136 @@ +package shwap + +import ( + "fmt" + "io" + + "github.com/celestiaorg/celestia-node/share" +) + +// RowNamespaceDataIDSize defines the total size of a RowNamespaceDataID in bytes, combining the +// size of a RowID and the size of a Namespace. +const RowNamespaceDataIDSize = RowIDSize + share.NamespaceSize + +// RowNamespaceDataID uniquely identifies a piece of namespaced data within a row of an Extended +// Data Square (EDS). +type RowNamespaceDataID struct { + RowID // Embedded RowID representing the specific row in the EDS. + DataNamespace share.Namespace // DataNamespace is a string representation of the namespace to facilitate comparisons. +} + +// NewRowNamespaceDataID creates a new RowNamespaceDataID with the specified parameters. It +// validates the RowNamespaceDataID against the provided EDS size. +func NewRowNamespaceDataID( + height uint64, + rowIdx int, + namespace share.Namespace, + edsSize int, +) (RowNamespaceDataID, error) { + did := RowNamespaceDataID{ + RowID: RowID{ + EdsID: EdsID{ + Height: height, + }, + RowIndex: rowIdx, + }, + DataNamespace: namespace, + } + + if err := did.Verify(edsSize); err != nil { + return RowNamespaceDataID{}, fmt.Errorf("verifying RowNamespaceDataID: %w", err) + } + return did, nil +} + +// RowNamespaceDataIDFromBinary deserializes a RowNamespaceDataID from its binary form. It returns +// an error if the binary data's length does not match the expected size. +func RowNamespaceDataIDFromBinary(data []byte) (RowNamespaceDataID, error) { + if len(data) != RowNamespaceDataIDSize { + return RowNamespaceDataID{}, + fmt.Errorf("invalid RowNamespaceDataID length: expected %d, got %d", RowNamespaceDataIDSize, len(data)) + } + + rid, err := RowIDFromBinary(data[:RowIDSize]) + if err != nil { + return RowNamespaceDataID{}, fmt.Errorf("unmarshaling RowID: %w", err) + } + + rndid := RowNamespaceDataID{ + RowID: rid, + DataNamespace: data[RowIDSize:], + } + if err := rndid.Validate(); err != nil { + return RowNamespaceDataID{}, fmt.Errorf("validating RowNamespaceDataID: %w", err) + } + + return rndid, nil +} + +// Equals checks equality of RowNamespaceDataID. +func (rndid *RowNamespaceDataID) Equals(other RowNamespaceDataID) bool { + return rndid.RowID.Equals(other.RowID) && rndid.DataNamespace.Equals(other.DataNamespace) +} + +// ReadFrom reads the binary form of RowNamespaceDataID from the provided reader. +func (rndid *RowNamespaceDataID) ReadFrom(r io.Reader) (int64, error) { + data := make([]byte, RowNamespaceDataIDSize) + n, err := io.ReadFull(r, data) + if err != nil { + return int64(n), err + } + if n != RowNamespaceDataIDSize { + return int64(n), fmt.Errorf("RowNamespaceDataID: expected %d bytes, got %d", RowNamespaceDataIDSize, n) + } + id, err := RowNamespaceDataIDFromBinary(data) + if err != nil { + return int64(n), fmt.Errorf("RowNamespaceDataIDFromBinary: %w", err) + } + *rndid = id + return int64(n), nil +} + +// MarshalBinary encodes RowNamespaceDataID into binary form. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 +func (rndid RowNamespaceDataID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, RowNamespaceDataIDSize) + return rndid.appendTo(data), nil +} + +// WriteTo writes the binary form of RowNamespaceDataID to the provided writer. +func (rndid RowNamespaceDataID) WriteTo(w io.Writer) (int64, error) { + data, err := rndid.MarshalBinary() + if err != nil { + return 0, err + } + n, err := w.Write(data) + return int64(n), err +} + +// Verify validates the RowNamespaceDataID and verifies the embedded RowID. +func (rndid RowNamespaceDataID) Verify(edsSize int) error { + if err := rndid.RowID.Verify(edsSize); err != nil { + return fmt.Errorf("error verifying RowID: %w", err) + } + + return rndid.Validate() +} + +// Validate performs basic field validation. +func (rndid RowNamespaceDataID) Validate() error { + if err := rndid.RowID.Validate(); err != nil { + return fmt.Errorf("validating RowID: %w", err) + } + if err := rndid.DataNamespace.ValidateForData(); err != nil { + return fmt.Errorf("%w: validating DataNamespace: %w", ErrInvalidID, err) + } + + return nil +} + +// appendTo helps in appending the binary form of DataNamespace to the serialized RowID data. +func (rndid RowNamespaceDataID) appendTo(data []byte) []byte { + data = rndid.RowID.appendTo(data) + return append(data, rndid.DataNamespace...) +} diff --git a/share/shwap/row_namespace_data_id_test.go b/share/shwap/row_namespace_data_id_test.go new file mode 100644 index 0000000000..dd3aa6b689 --- /dev/null +++ b/share/shwap/row_namespace_data_id_test.go @@ -0,0 +1,50 @@ +package shwap + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func TestRowNamespaceDataID(t *testing.T) { + edsSize := 4 + ns := sharetest.RandV0Namespace() + + id, err := NewRowNamespaceDataID(1, 1, ns, edsSize) + require.NoError(t, err) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + sidOut, err := RowNamespaceDataIDFromBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, sidOut) + + err = sidOut.Verify(edsSize) + require.NoError(t, err) + require.True(t, id.Equals(sidOut)) +} + +func TestRowNamespaceDataIDReaderWriter(t *testing.T) { + edsSize := 4 + ns := sharetest.RandV0Namespace() + + id, err := NewRowNamespaceDataID(1, 1, ns, edsSize) + require.NoError(t, err) + + buf := bytes.NewBuffer(nil) + n, err := id.WriteTo(buf) + require.NoError(t, err) + require.Equal(t, int64(RowNamespaceDataIDSize), n) + + rndidOut := RowNamespaceDataID{} + n, err = rndidOut.ReadFrom(buf) + require.NoError(t, err) + require.Equal(t, int64(RowNamespaceDataIDSize), n) + + require.EqualValues(t, id, rndidOut) +} diff --git a/share/shwap/row_namespace_data_test.go b/share/shwap/row_namespace_data_test.go new file mode 100644 index 0000000000..985f508c7b --- /dev/null +++ b/share/shwap/row_namespace_data_test.go @@ -0,0 +1,99 @@ +package shwap_test + +import ( + "bytes" + "context" + "slices" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestNamespacedRowFromShares(t *testing.T) { + const odsSize = 8 + + minNamespace, err := share.NewBlobNamespaceV0(slices.Concat(bytes.Repeat([]byte{0}, 8), []byte{1, 0})) + require.NoError(t, err) + err = minNamespace.ValidateForData() + require.NoError(t, err) + + for namespacedAmount := 1; namespacedAmount < odsSize; namespacedAmount++ { + shares := sharetest.RandSharesWithNamespace(t, minNamespace, namespacedAmount, odsSize) + parity, err := share.DefaultRSMT2DCodec().Encode(shares) + require.NoError(t, err) + extended := slices.Concat(shares, parity) + + nr, err := shwap.RowNamespaceDataFromShares(extended, minNamespace, 0) + require.NoError(t, err) + require.Equal(t, namespacedAmount, len(nr.Shares)) + } +} + +func TestNamespacedRowFromSharesNonIncluded(t *testing.T) { + // TODO: this will fail until absence proof support is added + t.Skip() + + const odsSize = 8 + // Test absent namespace + shares := sharetest.RandShares(t, odsSize) + absentNs, err := share.GetNamespace(shares[0]).AddInt(1) + require.NoError(t, err) + + parity, err := share.DefaultRSMT2DCodec().Encode(shares) + require.NoError(t, err) + extended := slices.Concat(shares, parity) + + nr, err := shwap.RowNamespaceDataFromShares(extended, absentNs, 0) + require.NoError(t, err) + require.Len(t, nr.Shares, 0) + require.True(t, nr.Proof.IsOfAbsence()) +} + +func TestValidateNamespacedRow(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + const odsSize = 8 + sharesAmount := odsSize * odsSize + namespace := sharetest.RandV0Namespace() + for amount := 1; amount < sharesAmount; amount++ { + randEDS, root := edstest.RandEDSWithNamespace(t, namespace, amount, odsSize) + rsmt2d := &eds.Rsmt2D{ExtendedDataSquare: randEDS} + nd, err := eds.NamespaceData(ctx, rsmt2d, namespace) + require.NoError(t, err) + require.True(t, len(nd) > 0) + + rowIdxs := share.RowsWithNamespace(root, namespace) + require.Len(t, nd, len(rowIdxs)) + + for i, rowIdx := range rowIdxs { + err = nd[i].Verify(root, namespace, rowIdx) + require.NoError(t, err) + } + } +} + +func TestNamespacedRowProtoEncoding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + const odsSize = 8 + namespace := sharetest.RandV0Namespace() + randEDS, _ := edstest.RandEDSWithNamespace(t, namespace, odsSize, odsSize) + rsmt2d := &eds.Rsmt2D{ExtendedDataSquare: randEDS} + nd, err := eds.NamespaceData(ctx, rsmt2d, namespace) + require.NoError(t, err) + require.True(t, len(nd) > 0) + + expected := nd[0] + pb := expected.ToProto() + ndOut := shwap.RowNamespaceDataFromProto(pb) + require.Equal(t, expected, ndOut) +} diff --git a/share/shwap/row_test.go b/share/shwap/row_test.go new file mode 100644 index 0000000000..353007b3a6 --- /dev/null +++ b/share/shwap/row_test.go @@ -0,0 +1,117 @@ +package shwap + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestRowFromShares(t *testing.T) { + const odsSize = 8 + eds := edstest.RandEDS(t, odsSize) + + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for _, side := range []RowSide{Left, Right} { + shares := eds.Row(uint(rowIdx)) + row := RowFromShares(shares, side) + extended, err := row.Shares() + require.NoError(t, err) + require.Equal(t, shares, extended) + + var half []share.Share + if side == Right { + half = shares[odsSize:] + } else { + half = shares[:odsSize] + } + require.Equal(t, half, row.halfShares) + require.Equal(t, side, row.side) + } + } +} + +func TestRowValidate(t *testing.T) { + const odsSize = 8 + eds := edstest.RandEDS(t, odsSize) + root, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for _, side := range []RowSide{Left, Right} { + shares := eds.Row(uint(rowIdx)) + row := RowFromShares(shares, side) + + err := row.Verify(root, rowIdx) + require.NoError(t, err) + err = row.Verify(root, rowIdx) + require.NoError(t, err) + } + } +} + +func TestRowValidateNegativeCases(t *testing.T) { + eds := edstest.RandEDS(t, 8) // Generate a random Extended Data Square of size 8 + root, err := share.NewAxisRoots(eds) + require.NoError(t, err) + shares := eds.Row(0) + row := RowFromShares(shares, Left) + + // Test with incorrect side specification + invalidSideRow := Row{halfShares: row.halfShares, side: RowSide(999)} + err = invalidSideRow.Verify(root, 0) + require.Error(t, err, "should error on invalid row side") + + // Test with invalid shares (more shares than expected) + incorrectShares := make([]share.Share, (eds.Width()/2)+1) // Adding an extra share + for i := range incorrectShares { + incorrectShares[i] = eds.GetCell(uint(i), 0) + } + invalidRow := Row{halfShares: incorrectShares, side: Left} + err = invalidRow.Verify(root, 0) + require.Error(t, err, "should error on incorrect number of shares") + + // Test with empty shares + emptyRow := Row{halfShares: []share.Share{}, side: Left} + err = emptyRow.Verify(root, 0) + require.Error(t, err, "should error on empty halfShares") + + // Doesn't match root. Corrupt root hash + root.RowRoots[0][len(root.RowRoots[0])-1] ^= 0xFF + err = row.Verify(root, 0) + require.Error(t, err, "should error on invalid root hash") +} + +func TestRowProtoEncoding(t *testing.T) { + const odsSize = 8 + eds := edstest.RandEDS(t, odsSize) + + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for _, side := range []RowSide{Left, Right} { + shares := eds.Row(uint(rowIdx)) + row := RowFromShares(shares, side) + + pb := row.ToProto() + rowOut := RowFromProto(pb) + require.Equal(t, row, rowOut) + } + } +} + +// BenchmarkRowValidate benchmarks the performance of row validation. +// BenchmarkRowValidate-10 9591 121802 ns/op +func BenchmarkRowValidate(b *testing.B) { + const odsSize = 32 + eds := edstest.RandEDS(b, odsSize) + root, err := share.NewAxisRoots(eds) + require.NoError(b, err) + shares := eds.Row(0) + row := RowFromShares(shares, Left) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = row.Verify(root, 0) + } +} diff --git a/share/shwap/sample.go b/share/shwap/sample.go new file mode 100644 index 0000000000..48ae22088a --- /dev/null +++ b/share/shwap/sample.go @@ -0,0 +1,130 @@ +package shwap + +import ( + "errors" + "fmt" + + "github.com/celestiaorg/celestia-app/v2/pkg/wrapper" + "github.com/celestiaorg/nmt" + nmt_pb "github.com/celestiaorg/nmt/pb" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +// SampleName is the name identifier for the sample container. +const SampleName = "sample_v0" + +// ErrFailedVerification is returned when inclusion proof verification fails. It is returned +// when the data and the proof do not match trusted data root. +var ErrFailedVerification = errors.New("failed to verify inclusion") + +// Sample represents a data share along with its Merkle proof, used to validate the share's +// inclusion in a data square. +type Sample struct { + share.Share // Embeds the Share which includes the data with namespace. + Proof *nmt.Proof // Proof is the Merkle Proof validating the share's inclusion. + ProofType rsmt2d.Axis // ProofType indicates whether the proof is against a row or a column. +} + +// SampleFromShares creates a Sample from a list of shares, using the specified proof type and +// the share index to be included in the sample. +func SampleFromShares(shares []share.Share, proofType rsmt2d.Axis, axisIdx, shrIdx int) (Sample, error) { + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(len(shares)/2), uint(axisIdx)) + for _, shr := range shares { + err := tree.Push(shr) + if err != nil { + return Sample{}, err + } + } + + proof, err := tree.ProveRange(shrIdx, shrIdx+1) + if err != nil { + return Sample{}, err + } + + return Sample{ + Share: shares[shrIdx], + Proof: &proof, + ProofType: proofType, + }, nil +} + +// SampleFromProto converts a protobuf Sample back into its domain model equivalent. +func SampleFromProto(s *pb.Sample) Sample { + proof := nmt.NewInclusionProof( + int(s.GetProof().GetStart()), + int(s.GetProof().GetEnd()), + s.GetProof().GetNodes(), + s.GetProof().GetIsMaxNamespaceIgnored(), + ) + return Sample{ + Share: ShareFromProto(s.GetShare()), + Proof: &proof, + ProofType: rsmt2d.Axis(s.GetProofType()), + } +} + +// ToProto converts a Sample into its protobuf representation for serialization purposes. +func (s Sample) ToProto() *pb.Sample { + return &pb.Sample{ + Share: &pb.Share{Data: s.Share}, + Proof: &nmt_pb.Proof{ + Start: int64(s.Proof.Start()), + End: int64(s.Proof.End()), + Nodes: s.Proof.Nodes(), + LeafHash: s.Proof.LeafHash(), + IsMaxNamespaceIgnored: s.Proof.IsMaxNamespaceIDIgnored(), + }, + ProofType: pb.AxisType(s.ProofType), + } +} + +// IsEmpty reports whether the Sample is empty, i.e. doesn't contain a proof. +func (s Sample) IsEmpty() bool { + return s.Proof == nil +} + +// Verify checks the inclusion of the share using its Merkle proof under the specified AxisRoots. +// Returns an error if the proof is invalid or does not correspond to the indicated proof type. +func (s Sample) Verify(roots *share.AxisRoots, rowIdx, colIdx int) error { + if s.Proof == nil || s.Proof.IsEmptyProof() { + return errors.New("nil proof") + } + if err := share.ValidateShare(s.Share); err != nil { + return err + } + if s.ProofType != rsmt2d.Row && s.ProofType != rsmt2d.Col { + return fmt.Errorf("invalid SampleProofType: %d", s.ProofType) + } + if !s.verifyInclusion(roots, rowIdx, colIdx) { + return ErrFailedVerification + } + return nil +} + +// verifyInclusion checks if the share is included in the given root hash at the specified indices. +func (s Sample) verifyInclusion(roots *share.AxisRoots, rowIdx, colIdx int) bool { + size := len(roots.RowRoots) + namespace := inclusionNamespace(s.Share, rowIdx, colIdx, size) + rootHash := share.RootHashForCoordinates(roots, s.ProofType, uint(rowIdx), uint(colIdx)) + return s.Proof.VerifyInclusion( + share.NewSHA256Hasher(), + namespace.ToNMT(), + [][]byte{s.Share}, + rootHash, + ) +} + +// inclusionNamespace returns the namespace for the share based on its position in the square. +// Shares from extended part of the square are considered parity shares. It means that +// parity shares are located outside of first quadrant of the square. According to the nmt +// specification, the parity shares are prefixed with the namespace of the parity shares. +func inclusionNamespace(sh share.Share, rowIdx, colIdx, squareSize int) share.Namespace { + isParity := colIdx >= squareSize/2 || rowIdx >= squareSize/2 + if isParity { + return share.ParitySharesNamespace + } + return share.GetNamespace(sh) +} diff --git a/share/shwap/sample_id.go b/share/shwap/sample_id.go new file mode 100644 index 0000000000..b03bbacfda --- /dev/null +++ b/share/shwap/sample_id.go @@ -0,0 +1,128 @@ +package shwap + +import ( + "encoding/binary" + "fmt" + "io" +) + +// SampleIDSize defines the size of the SampleID in bytes, combining RowID size and 2 additional +// bytes for the ShareIndex. +const SampleIDSize = RowIDSize + 2 + +// SampleID uniquely identifies a specific sample within a row of an Extended Data Square (EDS). +type SampleID struct { + RowID // Embeds RowID to incorporate block height and row index. + ShareIndex int // ShareIndex specifies the index of the sample within the row. +} + +// NewSampleID constructs a new SampleID using the provided block height, sample index, and EDS +// size. It calculates the row and share index based on the sample index and EDS size. +func NewSampleID(height uint64, rowIdx, colIdx, edsSize int) (SampleID, error) { + sid := SampleID{ + RowID: RowID{ + EdsID: EdsID{ + Height: height, + }, + RowIndex: rowIdx, + }, + ShareIndex: colIdx, + } + + if err := sid.Verify(edsSize); err != nil { + return SampleID{}, fmt.Errorf("verifying SampleID: %w", err) + } + return sid, nil +} + +// SampleIDFromBinary deserializes a SampleID from binary data, ensuring the data length matches +// the expected size. +func SampleIDFromBinary(data []byte) (SampleID, error) { + if len(data) != SampleIDSize { + return SampleID{}, fmt.Errorf("invalid SampleID data length: expected %d, got %d", SampleIDSize, len(data)) + } + + rid, err := RowIDFromBinary(data[:RowIDSize]) + if err != nil { + return SampleID{}, fmt.Errorf("decoding RowID: %w", err) + } + + sid := SampleID{ + RowID: rid, + ShareIndex: int(binary.BigEndian.Uint16(data[RowIDSize:])), + } + if err := sid.Validate(); err != nil { + return SampleID{}, fmt.Errorf("validating SampleID: %w", err) + } + + return sid, nil +} + +// Equals checks equality of SampleID. +func (sid *SampleID) Equals(other SampleID) bool { + return sid.RowID.Equals(other.RowID) && sid.ShareIndex == other.ShareIndex +} + +// ReadFrom reads the binary form of SampleID from the provided reader. +func (sid *SampleID) ReadFrom(r io.Reader) (int64, error) { + data := make([]byte, SampleIDSize) + n, err := io.ReadFull(r, data) + if err != nil { + return int64(n), err + } + if n != SampleIDSize { + return int64(n), fmt.Errorf("SampleID: expected %d bytes, got %d", SampleIDSize, n) + } + id, err := SampleIDFromBinary(data) + if err != nil { + return int64(n), fmt.Errorf("SampleIDFromBinary: %w", err) + } + *sid = id + return int64(n), nil +} + +// MarshalBinary encodes SampleID into binary form. +// NOTE: Proto is avoided because +// * Its size is not deterministic which is required for IPLD. +// * No support for uint16 +func (sid SampleID) MarshalBinary() ([]byte, error) { + data := make([]byte, 0, SampleIDSize) + return sid.appendTo(data), nil +} + +// WriteTo writes the binary form of SampleID to the provided writer. +func (sid SampleID) WriteTo(w io.Writer) (int64, error) { + data, err := sid.MarshalBinary() + if err != nil { + return 0, err + } + n, err := w.Write(data) + return int64(n), err +} + +// Verify validates the SampleID and verifies that the ShareIndex is within the bounds of +// the square size. +func (sid SampleID) Verify(edsSize int) error { + if err := sid.RowID.Verify(edsSize); err != nil { + return fmt.Errorf("verifying RowID: %w", err) + } + if sid.ShareIndex >= edsSize { + return fmt.Errorf("%w: ShareIndex: %d >= %d", ErrOutOfBounds, sid.ShareIndex, edsSize) + } + return sid.Validate() +} + +// Validate performs basic field validation. +func (sid SampleID) Validate() error { + if sid.ShareIndex < 0 { + return fmt.Errorf("%w: ShareIndex: %d < 0", ErrInvalidID, sid.ShareIndex) + } + return sid.RowID.Validate() +} + +// appendTo helps in constructing the binary representation by appending the encoded ShareIndex to +// the serialized RowID. +func (sid SampleID) appendTo(data []byte) []byte { + data = sid.RowID.appendTo(data) + return binary.BigEndian.AppendUint16(data, uint16(sid.ShareIndex)) +} diff --git a/share/shwap/sample_id_test.go b/share/shwap/sample_id_test.go new file mode 100644 index 0000000000..125d536854 --- /dev/null +++ b/share/shwap/sample_id_test.go @@ -0,0 +1,46 @@ +package shwap + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSampleID(t *testing.T) { + edsSize := 4 + + id, err := NewSampleID(1, 1, 1, edsSize) + require.NoError(t, err) + + data, err := id.MarshalBinary() + require.NoError(t, err) + + idOut, err := SampleIDFromBinary(data) + require.NoError(t, err) + assert.EqualValues(t, id, idOut) + + err = idOut.Verify(edsSize) + require.NoError(t, err) + require.True(t, id.Equals(idOut)) +} + +func TestSampleIDReaderWriter(t *testing.T) { + edsSize := 4 + + id, err := NewSampleID(1, 1, 1, edsSize) + require.NoError(t, err) + + buf := bytes.NewBuffer(nil) + n, err := id.WriteTo(buf) + require.NoError(t, err) + require.Equal(t, int64(SampleIDSize), n) + + sidOut := SampleID{} + n, err = sidOut.ReadFrom(buf) + require.NoError(t, err) + require.Equal(t, int64(SampleIDSize), n) + + require.EqualValues(t, id, sidOut) +} diff --git a/share/shwap/sample_test.go b/share/shwap/sample_test.go new file mode 100644 index 0000000000..ea57f68e94 --- /dev/null +++ b/share/shwap/sample_test.go @@ -0,0 +1,108 @@ +package shwap_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestSampleValidate(t *testing.T) { + const odsSize = 8 + randEDS := edstest.RandEDS(t, odsSize) + root, err := share.NewAxisRoots(randEDS) + require.NoError(t, err) + inMem := eds.Rsmt2D{ExtendedDataSquare: randEDS} + + for _, proofType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for colIdx := 0; colIdx < odsSize*2; colIdx++ { + sample, err := inMem.SampleForProofAxis(rowIdx, colIdx, proofType) + require.NoError(t, err) + + require.NoError(t, sample.Verify(root, rowIdx, colIdx)) + } + } + } +} + +// TestSampleNegativeVerifyInclusion checks +func TestSampleNegativeVerifyInclusion(t *testing.T) { + const odsSize = 8 + randEDS := edstest.RandEDS(t, odsSize) + root, err := share.NewAxisRoots(randEDS) + require.NoError(t, err) + inMem := eds.Rsmt2D{ExtendedDataSquare: randEDS} + + sample, err := inMem.Sample(context.Background(), 0, 0) + require.NoError(t, err) + err = sample.Verify(root, 0, 0) + require.NoError(t, err) + + // incorrect row index + err = sample.Verify(root, 1, 0) + require.ErrorIs(t, err, shwap.ErrFailedVerification) + + // Corrupt the share + sample.Share[0] ^= 0xFF + err = sample.Verify(root, 0, 0) + require.ErrorIs(t, err, shwap.ErrFailedVerification) + + // incorrect proofType + sample, err = inMem.Sample(context.Background(), 0, 0) + require.NoError(t, err) + sample.ProofType = rsmt2d.Col + err = sample.Verify(root, 0, 0) + require.ErrorIs(t, err, shwap.ErrFailedVerification) + + // Corrupt the last root hash byte + sample, err = inMem.Sample(context.Background(), 0, 0) + require.NoError(t, err) + root.RowRoots[0][len(root.RowRoots[0])-1] ^= 0xFF + err = sample.Verify(root, 0, 0) + require.ErrorIs(t, err, shwap.ErrFailedVerification) +} + +func TestSampleProtoEncoding(t *testing.T) { + const odsSize = 8 + randEDS := edstest.RandEDS(t, odsSize) + inMem := eds.Rsmt2D{ExtendedDataSquare: randEDS} + + for _, proofType := range []rsmt2d.Axis{rsmt2d.Row, rsmt2d.Col} { + for rowIdx := 0; rowIdx < odsSize*2; rowIdx++ { + for colIdx := 0; colIdx < odsSize*2; colIdx++ { + sample, err := inMem.SampleForProofAxis(rowIdx, colIdx, proofType) + require.NoError(t, err) + + pb := sample.ToProto() + sampleOut := shwap.SampleFromProto(pb) + require.NoError(t, err) + require.Equal(t, sample, sampleOut) + } + } + } +} + +// BenchmarkSampleValidate benchmarks the performance of sample validation. +// BenchmarkSampleValidate-10 284829 3935 ns/op +func BenchmarkSampleValidate(b *testing.B) { + const odsSize = 32 + randEDS := edstest.RandEDS(b, odsSize) + root, err := share.NewAxisRoots(randEDS) + require.NoError(b, err) + inMem := eds.Rsmt2D{ExtendedDataSquare: randEDS} + sample, err := inMem.SampleForProofAxis(0, 0, rsmt2d.Row) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = sample.Verify(root, 0, 0) + } +} diff --git a/share/shwap/share.go b/share/shwap/share.go new file mode 100644 index 0000000000..a7f7ef67b7 --- /dev/null +++ b/share/shwap/share.go @@ -0,0 +1,49 @@ +package shwap + +import ( + "fmt" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/shwap/pb" +) + +// ShareFromProto converts a protobuf Share object to the application's internal share +// representation. It returns nil if the input protobuf Share is nil, ensuring safe handling of nil +// values. +func ShareFromProto(s *pb.Share) share.Share { + if s == nil { + return nil + } + return s.Data +} + +// SharesToProto converts a slice of Shares from the application's internal representation to a +// slice of protobuf Share objects. This function allocates memory for the protobuf objects and +// copies data from the input slice. +func SharesToProto(shrs []share.Share) []*pb.Share { + protoShares := make([]*pb.Share, len(shrs)) + for i, shr := range shrs { + protoShares[i] = &pb.Share{Data: shr} + } + return protoShares +} + +// SharesFromProto converts a slice of protobuf Share objects to the application's internal slice +// of Shares. It ensures that each Share is correctly transformed using the ShareFromProto function. +func SharesFromProto(shrs []*pb.Share) []share.Share { + shares := make([]share.Share, len(shrs)) + for i, shr := range shrs { + shares[i] = ShareFromProto(shr) + } + return shares +} + +// ValidateShares takes the slice of shares and checks their conformance to share format. +func ValidateShares(shares []share.Share) error { + for i, shr := range shares { + if err := share.ValidateShare(shr); err != nil { + return fmt.Errorf("while validating share at index %d: %w", i, err) + } + } + return nil +} diff --git a/store/cache/accessor_cache.go b/store/cache/accessor_cache.go new file mode 100644 index 0000000000..893c73b7a9 --- /dev/null +++ b/store/cache/accessor_cache.go @@ -0,0 +1,264 @@ +package cache + +import ( + "context" + "fmt" + "runtime" + "sync" + "sync/atomic" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + + "github.com/celestiaorg/celestia-node/share/eds" +) + +const defaultCloseTimeout = time.Minute + +var _ Cache = (*AccessorCache)(nil) + +// AccessorCache implements the Cache interface using an LRU cache backend. +type AccessorCache struct { + // The name is a prefix that will be used for cache metrics if they are enabled. + name string + // stripedLocks prevents simultaneous RW access to the accessor cache. Instead + // of using only one lock or one lock per uint64, we stripe the uint64s across 256 locks. 256 is + // chosen because it 0-255 is the range of values we get looking at the last byte of the uint64. + stripedLocks [256]*sync.RWMutex + // Caches the accessor for a given uint64 for accessor read affinity, i.e., further reads will + // likely be from the same accessor. Maps (Datahash -> accessor). + cache *lru.Cache[uint64, *accessor] + + metrics *metrics +} + +// accessor is the value stored in Cache. It implements the eds.AccessorStreamer interface. It has a +// reference counted so that it can be removed from the cache only when all references are released. +type accessor struct { + eds.AccessorStreamer + + lock sync.Mutex + done chan struct{} + refs atomic.Int32 + isClosed bool +} + +func NewAccessorCache(name string, cacheSize int) (*AccessorCache, error) { + bc := &AccessorCache{ + name: name, + stripedLocks: [256]*sync.RWMutex{}, + } + + for i := range bc.stripedLocks { + bc.stripedLocks[i] = &sync.RWMutex{} + } + // Instantiate the Accessor Cache. + bslru, err := lru.NewWithEvict[uint64, *accessor](cacheSize, bc.evictFn()) + if err != nil { + return nil, fmt.Errorf("creating accessor cache %s: %w", name, err) + } + bc.cache = bslru + return bc, nil +} + +// evictFn will be invoked when an item is evicted from the cache. +func (bc *AccessorCache) evictFn() func(uint64, *accessor) { + return func(_ uint64, ac *accessor) { + // we don't want to block cache on close and can release accessor from cache early, while it is + // being closed in parallel routine + go func() { + err := ac.close() + if err != nil { + bc.metrics.observeEvicted(true) + log.Errorf("couldn't close accessor after cache eviction: %s", err) + return + } + bc.metrics.observeEvicted(false) + }() + } +} + +// Has checks if accessor for the height is present on the AccessorCache. +func (bc *AccessorCache) Has(height uint64) bool { + lk := bc.getLock(height) + lk.RLock() + defer lk.RUnlock() + + return bc.cache.Contains(height) +} + +// Get retrieves the accessor for a given uint64 from the Cache. If the Accessor is not in +// the Cache, it returns an ErrCacheMiss. +func (bc *AccessorCache) Get(height uint64) (eds.AccessorStreamer, error) { + lk := bc.getLock(height) + lk.RLock() + defer lk.RUnlock() + + ac, ok := bc.cache.Get(height) + if !ok { + bc.metrics.observeGet(false) + return nil, ErrCacheMiss + } + + bc.metrics.observeGet(true) + return newRefCloser(ac) +} + +// GetOrLoad attempts to get an item from the cache, and if not found, invokes +// the provided loader function to load it. +func (bc *AccessorCache) GetOrLoad( + ctx context.Context, + height uint64, + loader OpenAccessorFn, +) (eds.AccessorStreamer, error) { + lk := bc.getLock(height) + lk.Lock() + defer lk.Unlock() + + ac, ok := bc.cache.Get(height) + if ok { + // return accessor, only if it is not closed yet + accessorWithRef, err := newRefCloser(ac) + if err == nil { + bc.metrics.observeGet(true) + return accessorWithRef, nil + } + } + + // accessor not found in cache or closed, so load new one using loader + f, err := loader(ctx) + if err != nil { + return nil, fmt.Errorf("unable to load accessor: %w", err) + } + + ac = &accessor{AccessorStreamer: f} + // Create a new accessor first to increment the reference count in it, so it cannot get evicted + // from the inner lru cache before it is used. + rc, err := newRefCloser(ac) + if err != nil { + return nil, err + } + bc.cache.Add(height, ac) + return rc, nil +} + +// Remove removes the Accessor for a given uint64 from the cache. +func (bc *AccessorCache) Remove(height uint64) error { + lk := bc.getLock(height) + lk.RLock() + ac, ok := bc.cache.Get(height) + lk.RUnlock() + if !ok { + // item is not in cache + return nil + } + if err := ac.close(); err != nil { + return err + } + // The cache will call evictFn on removal, where accessor close will be called. + bc.cache.Remove(height) + return nil +} + +// EnableMetrics enables metrics for the cache. +func (bc *AccessorCache) EnableMetrics() (unreg func() error, err error) { + if bc.metrics == nil { + bc.metrics, err = newMetrics(bc) + } + return bc.metrics.reg.Unregister, err +} + +func (s *accessor) addRef() error { + s.lock.Lock() + defer s.lock.Unlock() + if s.isClosed { + // item is already closed and soon will be removed after all refs are released + return ErrCacheMiss + } + if s.refs.Add(1) == 1 { + // there were no refs previously and done channel was closed, reopen it by recreating + s.done = make(chan struct{}) + } + return nil +} + +func (s *accessor) removeRef() { + s.lock.Lock() + defer s.lock.Unlock() + if s.refs.Add(-1) <= 0 { + close(s.done) + } +} + +// close closes the accessor and removes it from the cache if it is not closed yet. It will block +// until all references are released or timeout is reached. +func (s *accessor) close() error { + s.lock.Lock() + if s.isClosed { + s.lock.Unlock() + // accessor will be closed by another goroutine + return nil + } + s.isClosed = true + done := s.done + s.lock.Unlock() + + // wait until all references are released or timeout is reached. If timeout is reached, log an + // error and close the accessor forcefully. + select { + case <-done: + case <-time.After(defaultCloseTimeout): + log.Errorf("closing accessor, some readers didn't close the accessor within timeout,"+ + " amount left: %v", s.refs.Load()) + } + if err := s.AccessorStreamer.Close(); err != nil { + return fmt.Errorf("closing accessor: %w", err) + } + return nil +} + +// refCloser exists for reference counting protection on accessor. It ensures that a caller can't +// decrement it more than once. +type refCloser struct { + *accessor + closed atomic.Bool + removeRef func() +} + +// newRefCloser creates new refCloser +func newRefCloser(abs *accessor) (*refCloser, error) { + if err := abs.addRef(); err != nil { + return nil, err + } + + rf := &refCloser{ + accessor: abs, + removeRef: abs.removeRef, + } + // Set finalizer to ensure that accessor is closed when refCloser is garbage collected. + // We expect that refCloser will be closed explicitly by the caller. If it is not closed, + // we log an error. + runtime.SetFinalizer(rf, func(rf *refCloser) { + if rf.close() { + log.Errorf("refCloser for accessor was garbage collected before Close was called") + } + }) + return rf, nil +} + +func (c *refCloser) close() bool { + if c.closed.CompareAndSwap(false, true) { + c.removeRef() + return true + } + return false +} + +func (c *refCloser) Close() error { + c.close() + return nil +} + +func (bc *AccessorCache) getLock(k uint64) *sync.RWMutex { + return bc.stripedLocks[byte(k%256)] +} diff --git a/share/eds/cache/accessor_cache_test.go b/store/cache/accessor_cache_test.go similarity index 60% rename from share/eds/cache/accessor_cache_test.go rename to store/cache/accessor_cache_test.go index 347b251a88..876a4fdc5a 100644 --- a/share/eds/cache/accessor_cache_test.go +++ b/store/cache/accessor_cache_test.go @@ -9,71 +9,74 @@ import ( "testing" "time" - "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/dagstore/shard" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" ) func TestAccessorCache(t *testing.T) { - t.Run("add / get item from cache", func(t *testing.T) { + t.Run("add / has / get item from cache", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() cache, err := NewAccessorCache("test", 1) require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{ data: []byte("test_data"), } - loaded, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + loaded, err := cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) + reader, err := loaded.Reader() + require.NoError(t, err) + data, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, mock.data, data) + err = loaded.Close() + require.NoError(t, err) // check if item exists - got, err := cache.Get(key) + has := cache.Has(height) + require.True(t, has) + got, err := cache.Get(height) require.NoError(t, err) - - l, err := io.ReadAll(loaded.Reader()) + reader, err = got.Reader() + require.NoError(t, err) + data, err = io.ReadAll(reader) require.NoError(t, err) - require.Equal(t, mock.data, l) - g, err := io.ReadAll(got.Reader()) + require.Equal(t, mock.data, data) + err = got.Close() require.NoError(t, err) - require.Equal(t, mock.data, g) }) - t.Run("get blockstore from accessor", func(t *testing.T) { + t.Run("get reader from accessor", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() cache, err := NewAccessorCache("test", 1) require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{} - accessor, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + accessor, err := cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) // check if item exists - _, err = cache.Get(key) + _, err = cache.Get(height) require.NoError(t, err) - // blockstore should be created only after first request - require.Equal(t, 0, mock.returnedBs) - - // try to get blockstore - _, err = accessor.Blockstore() - require.NoError(t, err) - - // second call to blockstore should return same blockstore - _, err = accessor.Blockstore() + // try to get reader + _, err = accessor.Reader() require.NoError(t, err) - require.Equal(t, 1, mock.returnedBs) }) t.Run("remove an item", func(t *testing.T) { @@ -83,24 +86,26 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{} - ac, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + ac, err := cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) err = ac.Close() require.NoError(t, err) - err = cache.Remove(key) + err = cache.Remove(height) require.NoError(t, err) // accessor should be closed on removal mock.checkClosed(t, true) // check if item exists - _, err = cache.Get(key) - require.ErrorIs(t, err, errCacheMiss) + has := cache.Has(height) + require.False(t, has) + _, err = cache.Get(height) + require.ErrorIs(t, err, ErrCacheMiss) }) t.Run("successive reads should read the same data", func(t *testing.T) { @@ -110,23 +115,27 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{data: []byte("test")} - accessor, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + accessor, err := cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) - loaded, err := io.ReadAll(accessor.Reader()) + reader, err := accessor.Reader() require.NoError(t, err) - require.Equal(t, mock.data, loaded) + data, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, mock.data, data) for i := 0; i < 2; i++ { - accessor, err = cache.Get(key) + accessor, err = cache.Get(height) + require.NoError(t, err) + reader, err := accessor.Reader() require.NoError(t, err) - got, err := io.ReadAll(accessor.Reader()) + data, err := io.ReadAll(reader) require.NoError(t, err) - require.Equal(t, mock.data, got) + require.Equal(t, mock.data, data) } }) @@ -137,9 +146,9 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{} - ac1, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + ac1, err := cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) @@ -147,8 +156,8 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add second item - key2 := shard.KeyFromString("key2") - ac2, err := cache.GetOrLoad(ctx, key2, func(ctx context.Context, key shard.Key) (Accessor, error) { + height2 := uint64(2) + ac2, err := cache.GetOrLoad(ctx, height2, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) @@ -158,9 +167,11 @@ func TestAccessorCache(t *testing.T) { // accessor should be closed on removal by eviction mock.checkClosed(t, true) - // check if item evicted - _, err = cache.Get(key) - require.ErrorIs(t, err, errCacheMiss) + // first item should be evicted from cache + has := cache.Has(height) + require.False(t, has) + _, err = cache.Get(height) + require.ErrorIs(t, err, ErrCacheMiss) }) t.Run("close on accessor is not closing underlying accessor", func(t *testing.T) { @@ -170,15 +181,15 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{} - _, err = cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + _, err = cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) // check if item exists - accessor, err := cache.Get(key) + accessor, err := cache.Get(height) require.NoError(t, err) require.NotNil(t, accessor) @@ -197,21 +208,21 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add accessor to the cache - key := shard.KeyFromString("key") + height := uint64(1) mock := &mockAccessor{} - accessor1, err := cache.GetOrLoad(ctx, key, func(ctx context.Context, key shard.Key) (Accessor, error) { + accessor1, err := cache.GetOrLoad(ctx, height, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock, nil }) require.NoError(t, err) // create second readers - accessor2, err := cache.Get(key) + accessor2, err := cache.Get(height) require.NoError(t, err) // initialize close done := make(chan struct{}) go func() { - err := cache.Remove(key) + err := cache.Remove(height) require.NoError(t, err) close(done) }() @@ -226,9 +237,9 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) mock.checkClosed(t, false) - // reads for item that is being evicted should result in errCacheMiss - _, err = cache.Get(key) - require.ErrorIs(t, err, errCacheMiss) + // reads for item that is being evicted should result in ErrCacheMiss + _, err = cache.Get(height) + require.ErrorIs(t, err, ErrCacheMiss) // close second reader and wait for accessor to be closed err = accessor2.Close() @@ -251,24 +262,24 @@ func TestAccessorCache(t *testing.T) { require.NoError(t, err) // add accessor to the cache - key1 := shard.KeyFromString("key1") + height1 := uint64(1) mock1 := &mockAccessor{} - accessor1, err := cache.GetOrLoad(ctx, key1, func(ctx context.Context, key shard.Key) (Accessor, error) { + accessor1, err := cache.GetOrLoad(ctx, height1, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock1, nil }) require.NoError(t, err) // add second accessor, to trigger eviction of the first one - key2 := shard.KeyFromString("key2") + height2 := uint64(2) mock2 := &mockAccessor{} - accessor2, err := cache.GetOrLoad(ctx, key2, func(ctx context.Context, key shard.Key) (Accessor, error) { + accessor2, err := cache.GetOrLoad(ctx, height2, func(ctx context.Context) (eds.AccessorStreamer, error) { return mock2, nil }) require.NoError(t, err) // first accessor should be evicted from cache - _, err = cache.Get(key1) - require.ErrorIs(t, err, errCacheMiss) + _, err = cache.Get(height1) + require.ErrorIs(t, err, ErrCacheMiss) // first accessor should not be closed before all refs are released by Close() is calls. mock1.checkClosed(t, false) @@ -286,26 +297,43 @@ func TestAccessorCache(t *testing.T) { } type mockAccessor struct { - m sync.Mutex - data []byte - isClosed bool - returnedBs int + m sync.Mutex + data []byte + isClosed bool } -func (m *mockAccessor) Reader() io.Reader { - m.m.Lock() - defer m.m.Unlock() - return bytes.NewBuffer(m.data) +func (m *mockAccessor) Size(context.Context) int { + panic("implement me") +} + +func (m *mockAccessor) DataHash(context.Context) (share.DataHash, error) { + panic("implement me") +} + +func (m *mockAccessor) AxisRoots(context.Context) (*share.AxisRoots, error) { + panic("implement me") +} + +func (m *mockAccessor) Sample(context.Context, int, int) (shwap.Sample, error) { + panic("implement me") +} + +func (m *mockAccessor) AxisHalf(context.Context, rsmt2d.Axis, int) (eds.AxisHalf, error) { + panic("implement me") } -func (m *mockAccessor) Blockstore() (dagstore.ReadBlockstore, error) { +func (m *mockAccessor) RowNamespaceData(context.Context, share.Namespace, int) (shwap.RowNamespaceData, error) { + panic("implement me") +} + +func (m *mockAccessor) Shares(context.Context) ([]share.Share, error) { + panic("implement me") +} + +func (m *mockAccessor) Reader() (io.Reader, error) { m.m.Lock() defer m.m.Unlock() - if m.returnedBs > 0 { - return nil, errors.New("blockstore already returned") - } - m.returnedBs++ - return rbsMock{}, nil + return bytes.NewBuffer(m.data), nil } func (m *mockAccessor) Close() error { @@ -319,32 +347,9 @@ func (m *mockAccessor) Close() error { } func (m *mockAccessor) checkClosed(t *testing.T, expected bool) { - // item will be removed in background, so give it some time to settle + // item will be removed async in background, give it some time to settle time.Sleep(time.Millisecond * 100) m.m.Lock() defer m.m.Unlock() require.Equal(t, expected, m.isClosed) } - -// rbsMock is a dagstore.ReadBlockstore mock -type rbsMock struct{} - -func (r rbsMock) Has(context.Context, cid.Cid) (bool, error) { - panic("implement me") -} - -func (r rbsMock) Get(_ context.Context, _ cid.Cid) (blocks.Block, error) { - panic("implement me") -} - -func (r rbsMock) GetSize(context.Context, cid.Cid) (int, error) { - panic("implement me") -} - -func (r rbsMock) AllKeysChan(context.Context) (<-chan cid.Cid, error) { - panic("implement me") -} - -func (r rbsMock) HashOnRead(bool) { - panic("implement me") -} diff --git a/store/cache/cache.go b/store/cache/cache.go new file mode 100644 index 0000000000..10e7dffcb3 --- /dev/null +++ b/store/cache/cache.go @@ -0,0 +1,39 @@ +package cache + +import ( + "context" + "errors" + + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel" + + "github.com/celestiaorg/celestia-node/share/eds" +) + +var ( + log = logging.Logger("store/cache") + meter = otel.Meter("store_cache") +) + +var ErrCacheMiss = errors.New("accessor not found in cache") + +type OpenAccessorFn func(context.Context) (eds.AccessorStreamer, error) + +// Cache is an interface that defines the basic Cache operations. +type Cache interface { + // Has checks if the Cache contains the eds.AccessorStreamer for the given height. + Has(height uint64) bool + + // Get returns the eds.AccessorStreamer for the given height. + Get(height uint64) (eds.AccessorStreamer, error) + + // GetOrLoad attempts to get an item from the Cache and, if not found, invokes + // the provided loader function to load it into the Cache. + GetOrLoad(ctx context.Context, height uint64, open OpenAccessorFn) (eds.AccessorStreamer, error) + + // Remove removes an item from Cache. + Remove(height uint64) error + + // EnableMetrics enables metrics in Cache + EnableMetrics() (unreg func() error, err error) +} diff --git a/store/cache/doublecache.go b/store/cache/doublecache.go new file mode 100644 index 0000000000..d0fb2c47b9 --- /dev/null +++ b/store/cache/doublecache.go @@ -0,0 +1,83 @@ +package cache + +import ( + "context" + "errors" + "fmt" + + "github.com/celestiaorg/celestia-node/share/eds" +) + +// DoubleCache represents a Cache that looks into multiple caches one by one. +type DoubleCache struct { + first, second Cache +} + +// NewDoubleCache creates a new DoubleCache with the provided caches. +func NewDoubleCache(first, second Cache) *DoubleCache { + return &DoubleCache{ + first: first, + second: second, + } +} + +// Has checks if accessor for the height is present on the AccessorCache. +func (mc *DoubleCache) Has(height uint64) bool { + return mc.first.Has(height) || mc.second.Has(height) +} + +// Get looks for an item in all the caches one by one and returns the Cache found item. +func (mc *DoubleCache) Get(height uint64) (eds.AccessorStreamer, error) { + accessor, err := mc.first.Get(height) + if err == nil { + return accessor, nil + } + return mc.second.Get(height) +} + +// GetOrLoad attempts to get an item from the both caches and, if not found, invokes +// the provided loader function to load it into the first Cache. +func (mc *DoubleCache) GetOrLoad( + ctx context.Context, + height uint64, + loader OpenAccessorFn, +) (eds.AccessorStreamer, error) { + // look-up in second cache first + accessor, err := mc.second.Get(height) + if err == nil { + return accessor, nil + } + // not found in second, get or load from first one + return mc.first.GetOrLoad(ctx, height, loader) +} + +// Remove removes an item from all underlying caches +func (mc *DoubleCache) Remove(height uint64) error { + err1 := mc.first.Remove(height) + err2 := mc.second.Remove(height) + return errors.Join(err1, err2) +} + +func (mc *DoubleCache) First() Cache { + return mc.first +} + +func (mc *DoubleCache) Second() Cache { + return mc.second +} + +func (mc *DoubleCache) EnableMetrics() (unreg func() error, err error) { + unreg1, err := mc.first.EnableMetrics() + if err != nil { + return nil, fmt.Errorf("while enabling metrics for first cache: %w", err) + } + unreg2, err := mc.second.EnableMetrics() + if err != nil { + return unreg1, fmt.Errorf("while enabling metrics for second cache: %w", err) + } + + unregFn := func() error { + return errors.Join(unreg1(), unreg2()) + } + return unregFn, nil +} diff --git a/share/eds/cache/metrics.go b/store/cache/metrics.go similarity index 70% rename from share/eds/cache/metrics.go rename to store/cache/metrics.go index 701b7e3a71..289ae80a96 100644 --- a/share/eds/cache/metrics.go +++ b/store/cache/metrics.go @@ -15,27 +15,26 @@ const ( type metrics struct { getCounter metric.Int64Counter evictedCounter metric.Int64Counter - - clientReg metric.Registration + reg metric.Registration } func newMetrics(bc *AccessorCache) (*metrics, error) { - metricsPrefix := "eds_blockstore_cache_" + bc.name + metricsPrefix := "eds_cache" + bc.name evictedCounter, err := meter.Int64Counter(metricsPrefix+"_evicted_counter", - metric.WithDescription("eds blockstore cache evicted event counter")) + metric.WithDescription("eds cache evicted event counter")) if err != nil { return nil, err } getCounter, err := meter.Int64Counter(metricsPrefix+"_get_counter", - metric.WithDescription("eds blockstore cache evicted event counter")) + metric.WithDescription("eds cache get event counter")) if err != nil { return nil, err } cacheSize, err := meter.Int64ObservableGauge(metricsPrefix+"_size", - metric.WithDescription("total amount of items in blockstore cache"), + metric.WithDescription("total amount of items in cache"), ) if err != nil { return nil, err @@ -45,23 +44,13 @@ func newMetrics(bc *AccessorCache) (*metrics, error) { observer.ObserveInt64(cacheSize, int64(bc.cache.Len())) return nil } - clientReg, err := meter.RegisterCallback(callback, cacheSize) - if err != nil { - return nil, err - } + reg, err := meter.RegisterCallback(callback, cacheSize) return &metrics{ getCounter: getCounter, evictedCounter: evictedCounter, - clientReg: clientReg, - }, nil -} - -func (m *metrics) close() error { - if m == nil { - return nil - } - return m.clientReg.Unregister() + reg: reg, + }, err } func (m *metrics) observeEvicted(failed bool) { diff --git a/store/cache/noop.go b/store/cache/noop.go new file mode 100644 index 0000000000..1a8aeb16c2 --- /dev/null +++ b/store/cache/noop.go @@ -0,0 +1,85 @@ +package cache + +import ( + "context" + "io" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ Cache = (*NoopCache)(nil) + +// NoopCache implements noop version of Cache interface +type NoopCache struct{} + +func (n NoopCache) Has(uint64) bool { + return false +} + +func (n NoopCache) Get(uint64) (eds.AccessorStreamer, error) { + return nil, ErrCacheMiss +} + +func (n NoopCache) GetOrLoad(ctx context.Context, _ uint64, loader OpenAccessorFn) (eds.AccessorStreamer, error) { + return loader(ctx) +} + +func (n NoopCache) Remove(uint64) error { + return nil +} + +func (n NoopCache) EnableMetrics() (unreg func() error, err error) { + noop := func() error { return nil } + return noop, nil +} + +var _ eds.AccessorStreamer = NoopFile{} + +// NoopFile implements noop version of eds.AccessorStreamer interface +type NoopFile struct{} + +func (n NoopFile) Reader() (io.Reader, error) { + return noopReader{}, nil +} + +func (n NoopFile) Size(context.Context) int { + return 0 +} + +func (n NoopFile) DataHash(context.Context) (share.DataHash, error) { + return share.DataHash{}, nil +} + +func (n NoopFile) AxisRoots(context.Context) (*share.AxisRoots, error) { + return &share.AxisRoots{}, nil +} + +func (n NoopFile) Sample(context.Context, int, int) (shwap.Sample, error) { + return shwap.Sample{}, nil +} + +func (n NoopFile) AxisHalf(context.Context, rsmt2d.Axis, int) (eds.AxisHalf, error) { + return eds.AxisHalf{}, nil +} + +func (n NoopFile) RowNamespaceData(context.Context, share.Namespace, int) (shwap.RowNamespaceData, error) { + return shwap.RowNamespaceData{}, nil +} + +func (n NoopFile) Shares(context.Context) ([]share.Share, error) { + return []share.Share{}, nil +} + +func (n NoopFile) Close() error { + return nil +} + +type noopReader struct{} + +func (n noopReader) Read([]byte) (int, error) { + return 0, nil +} diff --git a/store/file/codec.go b/store/file/codec.go new file mode 100644 index 0000000000..a27280be11 --- /dev/null +++ b/store/file/codec.go @@ -0,0 +1,38 @@ +package file + +import ( + "sync" + + "github.com/klauspost/reedsolomon" +) + +var codec Codec + +func init() { + codec = NewCodec() +} + +type Codec interface { + Encoder(len int) (reedsolomon.Encoder, error) +} + +type codecCache struct { + cache sync.Map +} + +func NewCodec() Codec { + return &codecCache{} +} + +func (l *codecCache) Encoder(len int) (reedsolomon.Encoder, error) { + enc, ok := l.cache.Load(len) + if !ok { + var err error + enc, err = reedsolomon.New(len/2, len/2, reedsolomon.WithLeopardGF(true)) + if err != nil { + return nil, err + } + l.cache.Store(len, enc) + } + return enc.(reedsolomon.Encoder), nil +} diff --git a/store/file/codec_test.go b/store/file/codec_test.go new file mode 100644 index 0000000000..857c16aff0 --- /dev/null +++ b/store/file/codec_test.go @@ -0,0 +1,83 @@ +package file + +import ( + "fmt" + "testing" + + "github.com/klauspost/reedsolomon" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/share/sharetest" +) + +func BenchmarkCodec(b *testing.B) { + minSize, maxSize := 32, 128 + + for size := minSize; size <= maxSize; size *= 2 { + // BenchmarkCodec/Leopard/size:32-10 409194 2793 ns/op + // BenchmarkCodec/Leopard/size:64-10 190969 6170 ns/op + // BenchmarkCodec/Leopard/size:128-10 82821 14287 ns/op + b.Run(fmt.Sprintf("Leopard/size:%v", size), func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(true)) + require.NoError(b, err) + + shards := newShards(b, size, true) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.Encode(shards) + require.NoError(b, err) + } + }) + + // BenchmarkCodec/default/size:32-10 222153 5364 ns/op + // BenchmarkCodec/default/size:64-10 58831 20349 ns/op + // BenchmarkCodec/default/size:128-10 14940 80471 ns/op + b.Run(fmt.Sprintf("default/size:%v", size), func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) + require.NoError(b, err) + + shards := newShards(b, size, true) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.Encode(shards) + require.NoError(b, err) + } + }) + + // BenchmarkCodec/default-reconstructSome/size:32-10 1263585 954.4 ns/op + // BenchmarkCodec/default-reconstructSome/size:64-10 762273 1554 ns/op + // BenchmarkCodec/default-reconstructSome/size:128-10 429268 2974 ns/op + b.Run(fmt.Sprintf("default-reconstructSome/size:%v", size), func(b *testing.B) { + enc, err := reedsolomon.New(size/2, size/2, reedsolomon.WithLeopardGF(false)) + require.NoError(b, err) + + shards := newShards(b, size, false) + targets := make([]bool, size) + target := size - 2 + targets[target] = true + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = enc.ReconstructSome(shards, targets) + require.NoError(b, err) + shards[target] = nil + } + }) + } +} + +func newShards(b testing.TB, size int, fillParity bool) [][]byte { + shards := make([][]byte, size) + original := sharetest.RandShares(b, size/2) + copy(shards, original) + + if fillParity { + // fill with parity empty Shares + for j := len(original); j < len(shards); j++ { + shards[j] = make([]byte, len(original[0])) + } + } + return shards +} diff --git a/store/file/file.go b/store/file/file.go new file mode 100644 index 0000000000..50bd7f86cd --- /dev/null +++ b/store/file/file.go @@ -0,0 +1,12 @@ +package file + +import logging "github.com/ipfs/go-log/v2" + +var log = logging.Logger("store/file") + +const ( + // writeBufferSize defines buffer size for optimized batched writes into the file system. + // TODO(@Wondertan): Consider making it configurable + writeBufferSize = 64 << 10 + filePermissions = 0o600 +) diff --git a/store/file/header.go b/store/file/header.go new file mode 100644 index 0000000000..7c2d8c7924 --- /dev/null +++ b/store/file/header.go @@ -0,0 +1,107 @@ +package file + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/celestiaorg/celestia-node/share" +) + +// headerVOSize is the size of the headerV0 in bytes. It has more space than the headerV0 struct +// to allow for future extensions of the header without breaking compatibility. +const headerVOSize int = 64 + +type headerVersion uint8 + +const headerVersionV0 headerVersion = iota + 1 + +type headerV0 struct { + fileVersion fileVersion + + // Taken directly from EDS + shareSize uint16 + squareSize uint16 + + datahash share.DataHash +} + +type fileVersion uint8 + +const ( + fileV0 fileVersion = iota + 1 +) + +func readHeader(r io.Reader) (*headerV0, error) { + // read first byte to determine the fileVersion + var version headerVersion + err := binary.Read(r, binary.LittleEndian, &version) + if err != nil { + return nil, fmt.Errorf("readHeader: %w", err) + } + + switch version { + case headerVersionV0: + h := &headerV0{} + _, err := h.ReadFrom(r) + return h, err + default: + return nil, fmt.Errorf("unsupported header fileVersion: %d", version) + } +} + +func writeHeader(w io.Writer, h *headerV0) error { + err := binary.Write(w, binary.LittleEndian, headerVersionV0) + if err != nil { + return fmt.Errorf("writeHeader: %w", err) + } + _, err = h.WriteTo(w) + return err +} + +func (h *headerV0) SquareSize() int { + return int(h.squareSize) +} + +func (h *headerV0) ShareSize() int { + return int(h.shareSize) +} + +func (h *headerV0) Size() int { + // header size + 1 byte for header fileVersion + return headerVOSize + 1 +} + +func (h *headerV0) RootsSize() int { + // axis roots are stored in two parts: row roots and column roots, each part has size equal to + // the square size. Thus, the total amount of roots is equal to the square size * 2. + return share.AxisRootSize * h.SquareSize() * 2 +} + +func (h *headerV0) OffsetWithRoots() int { + return h.RootsSize() + h.Size() +} + +func (h *headerV0) WriteTo(w io.Writer) (int64, error) { + buf := make([]byte, headerVOSize) + buf[0] = byte(h.fileVersion) + binary.LittleEndian.PutUint16(buf[28:30], h.shareSize) + binary.LittleEndian.PutUint16(buf[30:32], h.squareSize) + copy(buf[32:64], h.datahash) + n, err := w.Write(buf) + return int64(n), err +} + +func (h *headerV0) ReadFrom(r io.Reader) (int64, error) { + bytesHeader := make([]byte, headerVOSize) + n, err := io.ReadFull(r, bytesHeader) + if n != headerVOSize { + return 0, fmt.Errorf("headerV0 ReadFrom: read %d bytes, expected %d", len(bytesHeader), headerVOSize) + } + + h.fileVersion = fileVersion(bytesHeader[0]) + h.shareSize = binary.LittleEndian.Uint16(bytesHeader[28:30]) + h.squareSize = binary.LittleEndian.Uint16(bytesHeader[30:32]) + h.datahash = bytesHeader[32:64] + return int64(headerVOSize), err +} diff --git a/store/file/header_test.go b/store/file/header_test.go new file mode 100644 index 0000000000..25987ac13e --- /dev/null +++ b/store/file/header_test.go @@ -0,0 +1,50 @@ +package file + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +// Due to bug on macOS we need to specify additional linker flags: +// go test -v -run=^$ -fuzz=Fuzz_writeReadheader -ldflags=-extldflags=-Wl,-ld_classic . + +func Fuzz_writeReadheader(f *testing.F) { + f.Add(uint8(0), uint8(0), uint16(0), uint16(0), []byte{31: 0}) + f.Add(uint8(1), uint8(1), uint16(1), uint16(1), []byte{31: 0}) + f.Add(uint8(1), uint8(1), uint16(1), uint16(1), []byte{100: 1}) + + f.Fuzz(func(t *testing.T, ver, typ uint8, shs, sqs uint16, b []byte) { + // we expect hash to be 32 bytes, crop or extend to 32 bytes. + diff := len(b) - 32 + if diff > 0 { + b = b[:32] + } else { + pad := bytes.Repeat([]byte{0}, -diff) + b = append(b, pad...) + } + + testHdr := headerV0{ + fileVersion: fileVersion(ver), + shareSize: shs, + squareSize: sqs, + datahash: b, + } + + w := bytes.NewBuffer(nil) + err := writeHeader(w, &testHdr) + if err != nil { + return + } + + hdr, err := readHeader(w) + if err != nil { + return + } + + require.Equal(t, hdr.shareSize, testHdr.shareSize) + require.Equal(t, hdr.squareSize, testHdr.squareSize) + require.Equal(t, hdr.datahash, testHdr.datahash) + }) +} diff --git a/store/file/ods.go b/store/file/ods.go new file mode 100644 index 0000000000..8ade66bf53 --- /dev/null +++ b/store/file/ods.go @@ -0,0 +1,415 @@ +package file + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ eds.AccessorStreamer = (*ODS)(nil) + +// ODS implements eds.Accessor as an FS file. +// It stores the original data square(ODS), which is the first quadrant of EDS, +// and it's metadata in file's header. +type ODS struct { + hdr *headerV0 + fl *os.File + + lock sync.RWMutex + // ods stores an in-memory cache of the original data square to enhance read performance. This + // cache is particularly beneficial for operations that require reading the entire square, such as: + // - Serving samples from the fourth quadrant of the square, which necessitates reconstructing data + // from all rows. - Streaming the entire ODS by Reader(), ensuring efficient data delivery without + // repeated file reads. - Serving full ODS data by Shares(). + // Storing the square in memory allows for efficient single-read operations, avoiding the need for + // piecemeal reads by rows or columns, and facilitates quick access to data for these operations. + ods square + // disableCache is a flag that, when set to true, disables the in-memory cache of the original data + // Used for testing and benchmarking purposes, this flag allows for the evaluation of the + // performance. + disableCache bool +} + +// CreateODS creates a new file under given FS path and +// writes the ODS into it out of given EDS. +// It may leave partially written file if any of the writes fail. +func CreateODS( + path string, + roots *share.AxisRoots, + eds *rsmt2d.ExtendedDataSquare, +) error { + mod := os.O_RDWR | os.O_CREATE | os.O_EXCL // ensure we fail if already exist + f, err := os.OpenFile(path, mod, filePermissions) + if err != nil { + return fmt.Errorf("creating ODS file: %w", err) + } + + hdr := &headerV0{ + fileVersion: fileV0, + shareSize: share.Size, + squareSize: uint16(eds.Width()), + datahash: roots.Hash(), + } + + err = writeODSFile(f, roots, eds, hdr) + if errClose := f.Close(); errClose != nil { + err = errors.Join(err, fmt.Errorf("closing created ODS file: %w", errClose)) + } + + return err +} + +// writeQ4File full ODS content into OS File. +func writeODSFile(f *os.File, axisRoots *share.AxisRoots, eds *rsmt2d.ExtendedDataSquare, hdr *headerV0) error { + // buffering gives us ~4x speed up + buf := bufio.NewWriterSize(f, writeBufferSize) + + if err := writeHeader(f, hdr); err != nil { + return fmt.Errorf("writing header: %w", err) + } + + if err := writeAxisRoots(buf, axisRoots); err != nil { + return fmt.Errorf("writing axis roots: %w", err) + } + + if err := writeODS(buf, eds); err != nil { + return fmt.Errorf("writing ODS: %w", err) + } + + if err := buf.Flush(); err != nil { + return fmt.Errorf("flushing ODS file: %w", err) + } + + return nil +} + +// writeODS writes the first quadrant(ODS) of the square to the writer. It writes the quadrant in +// row-major order. Write finishes once all the shares are written or on the first instance of tail +// padding share. Tail padding share are constant and aren't stored. +func writeODS(w io.Writer, eds *rsmt2d.ExtendedDataSquare) error { + for i := range eds.Width() / 2 { + for j := range eds.Width() / 2 { + shr := eds.GetCell(i, j) // TODO: Avoid copying inside GetCell + if share.GetNamespace(shr).Equals(share.TailPaddingNamespace) { + return nil + } + + _, err := w.Write(shr) + if err != nil { + return fmt.Errorf("writing share: %w", err) + } + } + } + return nil +} + +// writeAxisRoots writes RowRoots followed by ColumnRoots. +func writeAxisRoots(w io.Writer, roots *share.AxisRoots) error { + for _, root := range roots.RowRoots { + if _, err := w.Write(root); err != nil { + return fmt.Errorf("writing row roots: %w", err) + } + } + + for _, root := range roots.ColumnRoots { + if _, err := w.Write(root); err != nil { + return fmt.Errorf("writing columm roots: %w", err) + } + } + + return nil +} + +// OpenODS opens an existing ODS file under given FS path. +// It only reads the header with metadata. The other content +// of the File is read lazily. +// If file is empty, the ErrEmptyFile is returned. +// File must be closed after usage. +func OpenODS(path string) (*ODS, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + h, err := readHeader(f) + if err != nil { + return nil, err + } + + return &ODS{ + hdr: h, + fl: f, + }, nil +} + +// Size returns EDS size stored in file's header. +func (o *ODS) Size(context.Context) int { + return o.size() +} + +func (o *ODS) size() int { + return int(o.hdr.squareSize) +} + +// DataHash returns root hash of Accessor's underlying EDS. +func (o *ODS) DataHash(context.Context) (share.DataHash, error) { + return o.hdr.datahash, nil +} + +// AxisRoots reads AxisRoots stored in the file. AxisRoots are stored after the header and before +// the ODS data. +func (o *ODS) AxisRoots(context.Context) (*share.AxisRoots, error) { + roots := make([]byte, o.hdr.RootsSize()) + n, err := o.fl.ReadAt(roots, int64(o.hdr.Size())) + if err != nil { + return nil, fmt.Errorf("reading axis roots: %w", err) + } + if n != len(roots) { + return nil, fmt.Errorf("reading axis roots: expected %d bytes, got %d", len(roots), n) + } + rowRoots := make([][]byte, o.size()) + colRoots := make([][]byte, o.size()) + for i := 0; i < o.size(); i++ { + rowRoots[i] = roots[i*share.AxisRootSize : (i+1)*share.AxisRootSize] + colRoots[i] = roots[(o.size()+i)*share.AxisRootSize : (o.size()+i+1)*share.AxisRootSize] + } + axisRoots := &share.AxisRoots{ + RowRoots: rowRoots, + ColumnRoots: colRoots, + } + return axisRoots, nil +} + +// Close closes the file. +func (o *ODS) Close() error { + return o.fl.Close() +} + +// Sample returns share and corresponding proof for row and column indices. Implementation can +// choose which axis to use for proof. Chosen axis for proof should be indicated in the returned +// Sample. +func (o *ODS) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) { + // Sample proof axis is selected to optimize read performance. + // - For the first and second quadrants, we read the row axis because it is more efficient to read + // single row than reading full ODS to calculate single column + // - For the third quadrant, we read the column axis because it is more efficient to read single + // column than reading full ODS to calculate single row + // - For the fourth quadrant, it does not matter which axis we read because we need to read full ODS + // to calculate the sample + axisType, axisIdx, shrIdx := rsmt2d.Row, rowIdx, colIdx + if colIdx < o.size()/2 && rowIdx >= o.size()/2 { + axisType, axisIdx, shrIdx = rsmt2d.Col, colIdx, rowIdx + } + + axis, err := o.axis(ctx, axisType, axisIdx) + if err != nil { + return shwap.Sample{}, fmt.Errorf("reading axis: %w", err) + } + + return shwap.SampleFromShares(axis, axisType, axisIdx, shrIdx) +} + +// AxisHalf returns half of shares axis of the given type and index. Side is determined by +// implementation. Implementations should indicate the side in the returned AxisHalf. +func (o *ODS) AxisHalf(_ context.Context, axisType rsmt2d.Axis, axisIdx int) (eds.AxisHalf, error) { + // Read the axis from the file if the axis is a row and from the top half of the square, or if the + // axis is a column and from the left half of the square. + if axisIdx < o.size()/2 { + half, err := o.readAxisHalf(axisType, axisIdx) + if err != nil { + return eds.AxisHalf{}, fmt.Errorf("reading axis half: %w", err) + } + return half, nil + } + + // if axis is from the second half of the square, read full ODS and compute the axis half + ods, err := o.readODS() + if err != nil { + return eds.AxisHalf{}, err + } + + half, err := ods.computeAxisHalf(axisType, axisIdx) + if err != nil { + return eds.AxisHalf{}, fmt.Errorf("computing axis half: %w", err) + } + return half, nil +} + +// RowNamespaceData returns data for the given namespace and row index. +func (o *ODS) RowNamespaceData( + ctx context.Context, + namespace share.Namespace, + rowIdx int, +) (shwap.RowNamespaceData, error) { + shares, err := o.axis(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return shwap.RowNamespaceData{}, err + } + return shwap.RowNamespaceDataFromShares(shares, namespace, rowIdx) +} + +// Shares returns data shares extracted from the Accessor. +func (o *ODS) Shares(context.Context) ([]share.Share, error) { + ods, err := o.readODS() + if err != nil { + return nil, err + } + return ods.shares() +} + +// Reader returns binary reader for the file. It reads the shares from the ODS part of the square +// row by row. +func (o *ODS) Reader() (io.Reader, error) { + o.lock.RLock() + ods := o.ods + o.lock.RUnlock() + if ods != nil { + return ods.reader() + } + + offset := o.hdr.OffsetWithRoots() + total := int64(o.hdr.shareSize) * int64(o.size()*o.size()/4) + reader := io.NewSectionReader(o.fl, int64(offset), total) + return reader, nil +} + +func (o *ODS) axis(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) ([]share.Share, error) { + half, err := o.AxisHalf(ctx, axisType, axisIdx) + if err != nil { + return nil, err + } + + axis, err := half.Extended() + if err != nil { + return nil, fmt.Errorf("extending axis half: %w", err) + } + + return axis, nil +} + +func (o *ODS) readAxisHalf(axisType rsmt2d.Axis, axisIdx int) (eds.AxisHalf, error) { + o.lock.RLock() + ods := o.ods + o.lock.RUnlock() + if ods != nil { + return o.ods.axisHalf(axisType, axisIdx) + } + + axisHalf, err := readAxisHalf(o.fl, axisType, axisIdx, o.hdr, o.hdr.OffsetWithRoots()) + if err != nil { + return eds.AxisHalf{}, fmt.Errorf("reading axis half: %w", err) + } + + return eds.AxisHalf{ + Shares: axisHalf, + IsParity: false, + }, nil +} + +func (o *ODS) readODS() (square, error) { + if !o.disableCache { + o.lock.RLock() + ods := o.ods + o.lock.RUnlock() + if ods != nil { + return ods, nil + } + + // not cached, read and cache + o.lock.Lock() + defer o.lock.Unlock() + } + + offset := o.hdr.OffsetWithRoots() + shareSize := o.hdr.ShareSize() + odsBytes := o.hdr.SquareSize() / 2 + odsSizeInBytes := shareSize * odsBytes * odsBytes + reader := io.NewSectionReader(o.fl, int64(offset), int64(odsSizeInBytes)) + ods, err := readSquare(reader, shareSize, o.size()) + if err != nil { + return nil, fmt.Errorf("reading ODS: %w", err) + } + + if !o.disableCache { + o.ods = ods + } + return ods, nil +} + +func readAxisHalf(r io.ReaderAt, axisTp rsmt2d.Axis, axisIdx int, hdr *headerV0, offset int) ([]share.Share, error) { + switch axisTp { + case rsmt2d.Row: + return readRowHalf(r, axisIdx, hdr, offset) + case rsmt2d.Col: + return readColHalf(r, axisIdx, hdr, offset) + default: + return nil, fmt.Errorf("unknown axis") + } +} + +// readRowHalf reads specific Row half from the file in a single IO operation. +// If some or all shares are missing, tail padding shares are returned instead. +func readRowHalf(r io.ReaderAt, rowIdx int, hdr *headerV0, offset int) ([]share.Share, error) { + odsLn := hdr.SquareSize() / 2 + rowOffset := rowIdx * odsLn * hdr.ShareSize() + offset += rowOffset + + shares := make([]share.Share, odsLn) + axsData := make([]byte, odsLn*hdr.ShareSize()) + n, err := r.ReadAt(axsData, int64(offset)) + if err != nil && !errors.Is(err, io.EOF) { + // unknown error + return nil, err + } + + shrsRead := n / hdr.ShareSize() + for i := range shares { + if i > shrsRead-1 { + // partial or empty row was read + // fill the rest with tail padding it + shares[i] = share.TailPadding() + continue + } + shares[i] = axsData[i*hdr.ShareSize() : (i+1)*hdr.ShareSize()] + } + return shares, nil +} + +// readColHalf reads specific Col half from the file in a single IO operation. +// If some or all shares are missing, tail padding shares are returned instead. +func readColHalf(r io.ReaderAt, colIdx int, hdr *headerV0, offset int) ([]share.Share, error) { + odsLn := hdr.SquareSize() / 2 + shares := make([]share.Share, odsLn) + for i := range shares { + pos := colIdx + i*odsLn + offset := offset + pos*hdr.ShareSize() + + shr := make(share.Share, hdr.ShareSize()) + n, err := r.ReadAt(shr, int64(offset)) + if err != nil && !errors.Is(err, io.EOF) { + // unknown error + return nil, err + } + if n == 0 { + // no shares left + // fill the rest with tail padding + for ; i < len(shares); i++ { + shares[i] = share.TailPadding() + } + return shares, nil + } + // we got a share + shares[i] = shr + } + return shares, nil +} diff --git a/store/file/ods_q4.go b/store/file/ods_q4.go new file mode 100644 index 0000000000..70c48b5749 --- /dev/null +++ b/store/file/ods_q4.go @@ -0,0 +1,178 @@ +package file + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "sync" + "sync/atomic" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ eds.AccessorStreamer = (*ODSQ4)(nil) + +// ODSQ4 is an Accessor that combines ODS and Q4 files. +// It extends the ODS with the ability to read Q4 of the EDS. +// Reading from the fourth quadrant allows to efficiently read samples from Q2 and Q4 quadrants of +// the square, as well as reading columns from Q3 and Q4 quadrants. Reading from Q4 in those cases +// is more efficient than reading from Q1, because it would require reading the whole Q1 quadrant +// and reconstructing the data from it. It opens Q4 file lazily on the first read attempt. +type ODSQ4 struct { + ods *ODS + + pathQ4 string + q4Mu sync.Mutex + q4OpenAttempted atomic.Bool + q4 *q4 +} + +// CreateODSQ4 creates ODS and Q4 files under the given FS paths. +func CreateODSQ4( + pathODS, pathQ4 string, + roots *share.AxisRoots, + eds *rsmt2d.ExtendedDataSquare, +) error { + errCh := make(chan error) + go func() { + // doing this async shaves off ~27% of time for 128 ODS + // for bigger ODSes the discrepancy is even bigger + err := createQ4(pathQ4, eds) + if err != nil { + err = fmt.Errorf("сreating Q4 file: %w", err) + } + + errCh <- err + }() + + if err := CreateODS(pathODS, roots, eds); err != nil { + return fmt.Errorf("creating ODS file: %w", err) + } + + err := <-errCh + if err != nil { + return err + } + + return nil +} + +// ODSWithQ4 returns ODSQ4 instance over ODS. It opens Q4 file lazily under the given path. +func ODSWithQ4(ods *ODS, pathQ4 string) *ODSQ4 { + return &ODSQ4{ + ods: ods, + pathQ4: pathQ4, + } +} + +func (odsq4 *ODSQ4) tryLoadQ4() *q4 { + // If Q4 was attempted to be opened before, return. + if odsq4.q4OpenAttempted.Load() { + return odsq4.q4 + } + + odsq4.q4Mu.Lock() + defer odsq4.q4Mu.Unlock() + if odsq4.q4OpenAttempted.Load() { + return odsq4.q4 + } + + q4, err := openQ4(odsq4.pathQ4, odsq4.ods.hdr) + // store q4 opened bool before updating atomic value to allow next read attempts to use it + odsq4.q4 = q4 + // even if error occurred, store q4 opened bool to avoid trying to open it again + odsq4.q4OpenAttempted.Store(true) + if errors.Is(err, os.ErrNotExist) { + return nil + } + if err != nil { + log.Errorf("opening Q4 file %s: %s", odsq4.pathQ4, err) + return nil + } + return q4 +} + +func (odsq4 *ODSQ4) Size(ctx context.Context) int { + return odsq4.ods.Size(ctx) +} + +func (odsq4 *ODSQ4) DataHash(ctx context.Context) (share.DataHash, error) { + return odsq4.ods.DataHash(ctx) +} + +func (odsq4 *ODSQ4) AxisRoots(ctx context.Context) (*share.AxisRoots, error) { + return odsq4.ods.AxisRoots(ctx) +} + +func (odsq4 *ODSQ4) Sample(ctx context.Context, rowIdx, colIdx int) (shwap.Sample, error) { + // use native AxisHalf implementation, to read axis from q4 quadrant when possible + half, err := odsq4.AxisHalf(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return shwap.Sample{}, fmt.Errorf("reading axis: %w", err) + } + shares, err := half.Extended() + if err != nil { + return shwap.Sample{}, fmt.Errorf("extending shares: %w", err) + } + return shwap.SampleFromShares(shares, rsmt2d.Row, rowIdx, colIdx) +} + +func (odsq4 *ODSQ4) AxisHalf(ctx context.Context, axisType rsmt2d.Axis, axisIdx int) (eds.AxisHalf, error) { + size := odsq4.Size(ctx) // TODO(@Wondertan): Should return error. + + if axisIdx >= size/2 { + // lazy load Q4 file and read axis from it if loaded + if q4 := odsq4.tryLoadQ4(); q4 != nil { + return q4.axisHalf(axisType, axisIdx) + } + } + + return odsq4.ods.AxisHalf(ctx, axisType, axisIdx) +} + +func (odsq4 *ODSQ4) RowNamespaceData(ctx context.Context, + namespace share.Namespace, + rowIdx int, +) (shwap.RowNamespaceData, error) { + half, err := odsq4.AxisHalf(ctx, rsmt2d.Row, rowIdx) + if err != nil { + return shwap.RowNamespaceData{}, fmt.Errorf("reading axis: %w", err) + } + shares, err := half.Extended() + if err != nil { + return shwap.RowNamespaceData{}, fmt.Errorf("extending shares: %w", err) + } + return shwap.RowNamespaceDataFromShares(shares, namespace, rowIdx) +} + +func (odsq4 *ODSQ4) Shares(ctx context.Context) ([]share.Share, error) { + return odsq4.ods.Shares(ctx) +} + +func (odsq4 *ODSQ4) Reader() (io.Reader, error) { + return odsq4.ods.Reader() +} + +func (odsq4 *ODSQ4) Close() error { + err := odsq4.ods.Close() + if err != nil { + err = fmt.Errorf("closing ODS file: %w", err) + } + + odsq4.q4Mu.Lock() // wait in case file is being opened + defer odsq4.q4Mu.Unlock() + if odsq4.q4 != nil { + errQ4 := odsq4.q4.close() + if errQ4 != nil { + errQ4 = fmt.Errorf("closing Q4 file: %w", errQ4) + err = errors.Join(err, errQ4) + } + } + return err +} diff --git a/store/file/ods_q4_test.go b/store/file/ods_q4_test.go new file mode 100644 index 0000000000..4d9d852e05 --- /dev/null +++ b/store/file/ods_q4_test.go @@ -0,0 +1,100 @@ +package file + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestCreateODSQ4File(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + edsIn := edstest.RandEDS(t, 8) + odsq4 := createODSQ4File(t, edsIn) + + shares, err := odsq4.Shares(ctx) + require.NoError(t, err) + expected := edsIn.FlattenedODS() + require.Equal(t, expected, shares) + require.NoError(t, odsq4.Close()) +} + +func TestODSQ4File(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + t.Cleanup(cancel) + + ODSSize := 16 + eds.TestSuiteAccessor(ctx, t, createODSQ4Accessor, ODSSize) + eds.TestStreamer(ctx, t, createODSQ4AccessorStreamer, ODSSize) +} + +// BenchmarkAxisFromODSQ4File/Size:32/ProofType:row/squareHalf:0-16 354836 3345 ns/op +// BenchmarkAxisFromODSQ4File/Size:32/ProofType:row/squareHalf:1-16 339547 3187 ns/op +// BenchmarkAxisFromODSQ4File/Size:32/ProofType:col/squareHalf:0-16 69364 16440 ns/op +// BenchmarkAxisFromODSQ4File/Size:32/ProofType:col/squareHalf:1-16 66928 15964 ns/op +// BenchmarkAxisFromODSQ4File/Size:64/ProofType:row/squareHalf:0-16 223290 5184 ns/op +// BenchmarkAxisFromODSQ4File/Size:64/ProofType:row/squareHalf:1-16 194018 5240 ns/op +// BenchmarkAxisFromODSQ4File/Size:64/ProofType:col/squareHalf:0-16 39949 29549 ns/op +// BenchmarkAxisFromODSQ4File/Size:64/ProofType:col/squareHalf:1-16 39356 29912 ns/op +// BenchmarkAxisFromODSQ4File/Size:128/ProofType:row/squareHalf:0-16 134220 8903 ns/op +// BenchmarkAxisFromODSQ4File/Size:128/ProofType:row/squareHalf:1-16 125110 8789 ns/op +// BenchmarkAxisFromODSQ4File/Size:128/ProofType:col/squareHalf:0-16 15075 74996 ns/op +// BenchmarkAxisFromODSQ4File/Size:128/ProofType:col/squareHalf:1-16 15530 74855 ns/op +func BenchmarkAxisFromODSQ4File(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + b.Cleanup(cancel) + + minSize, maxSize := 32, 128 + eds.BenchGetHalfAxisFromAccessor(ctx, b, createODSQ4Accessor, minSize, maxSize) +} + +// BenchmarkSampleFromODSQ4File/Size:32/quadrant:1-16 14260 82827 ns/op +// BenchmarkSampleFromODSQ4File/Size:32/quadrant:2-16 14281 85465 ns/op +// BenchmarkSampleFromODSQ4File/Size:32/quadrant:3-16 12938 91213 ns/op +// BenchmarkSampleFromODSQ4File/Size:32/quadrant:4-16 12934 94077 ns/op +// BenchmarkSampleFromODSQ4File/Size:64/quadrant:1-16 7497 172978 ns/op +// BenchmarkSampleFromODSQ4File/Size:64/quadrant:2-16 6332 191139 ns/op +// BenchmarkSampleFromODSQ4File/Size:64/quadrant:3-16 5852 214140 ns/op +// BenchmarkSampleFromODSQ4File/Size:64/quadrant:4-16 5899 215875 ns/op +// BenchmarkSampleFromODSQ4File/Size:128/quadrant:1-16 3520 399728 ns/op +// BenchmarkSampleFromODSQ4File/Size:128/quadrant:2-16 3242 410557 ns/op +// BenchmarkSampleFromODSQ4File/Size:128/quadrant:3-16 2590 424491 ns/op +// BenchmarkSampleFromODSQ4File/Size:128/quadrant:4-16 2812 444697 ns/op +func BenchmarkSampleFromODSQ4File(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + b.Cleanup(cancel) + + minSize, maxSize := 32, 128 + eds.BenchGetSampleFromAccessor(ctx, b, createODSQ4Accessor, minSize, maxSize) +} + +func createODSQ4AccessorStreamer(t testing.TB, eds *rsmt2d.ExtendedDataSquare) eds.AccessorStreamer { + return createODSQ4File(t, eds) +} + +func createODSQ4Accessor(t testing.TB, eds *rsmt2d.ExtendedDataSquare) eds.Accessor { + return createODSQ4File(t, eds) +} + +func createODSQ4File(t testing.TB, eds *rsmt2d.ExtendedDataSquare) *ODSQ4 { + path := t.TempDir() + "/" + strconv.Itoa(rand.Intn(1000)) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + pathODS, pathQ4 := path+".ods", path+".q4" + err = CreateODSQ4(pathODS, pathQ4, roots, eds) + require.NoError(t, err) + ods, err := OpenODS(pathODS) + require.NoError(t, err) + return ODSWithQ4(ods, pathQ4) +} diff --git a/store/file/ods_test.go b/store/file/ods_test.go new file mode 100644 index 0000000000..2866072019 --- /dev/null +++ b/store/file/ods_test.go @@ -0,0 +1,175 @@ +package file + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" +) + +func TestCreateODSFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + edsIn := edstest.RandEDS(t, 8) + f := createODSFile(t, edsIn) + readRoots, err := share.NewAxisRoots(edsIn) + require.NoError(t, err) + + shares, err := f.Shares(ctx) + require.NoError(t, err) + + expected := edsIn.FlattenedODS() + require.Equal(t, expected, shares) + + roots, err := f.AxisRoots(ctx) + require.NoError(t, err) + require.Equal(t, share.DataHash(roots.Hash()), f.hdr.datahash) + require.True(t, roots.Equals(readRoots)) + require.NoError(t, f.Close()) +} + +func TestReadODSFromFile(t *testing.T) { + eds := edstest.RandEDS(t, 8) + f := createODSFile(t, eds) + + ods, err := f.readODS() + require.NoError(t, err) + for i, row := range ods { + original := eds.Row(uint(i))[:eds.Width()/2] + require.True(t, len(original) == len(row)) + require.Equal(t, original, row) + } +} + +func TestODSFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + t.Cleanup(cancel) + + ODSSize := 16 + eds.TestSuiteAccessor(ctx, t, createODSAccessor, ODSSize) + eds.TestStreamer(ctx, t, createCachedStreamer, ODSSize) + eds.TestStreamer(ctx, t, createODSAccessorStreamer, ODSSize) +} + +// BenchmarkAxisFromODSFile/Size:32/ProofType:row/squareHalf:0-16 382011 3104 ns/op +// BenchmarkAxisFromODSFile/Size:32/ProofType:row/squareHalf:1-16 9320 122408 ns/op +// BenchmarkAxisFromODSFile/Size:32/ProofType:col/squareHalf:0-16 4408911 266.5 ns/op +// BenchmarkAxisFromODSFile/Size:32/ProofType:col/squareHalf:1-16 9488 119472 ns/op +// BenchmarkAxisFromODSFile/Size:64/ProofType:row/squareHalf:0-16 240913 5239 ns/op +// BenchmarkAxisFromODSFile/Size:64/ProofType:row/squareHalf:1-16 1018 1249622 ns/op +// BenchmarkAxisFromODSFile/Size:64/ProofType:col/squareHalf:0-16 2614063 451.8 ns/op +// BenchmarkAxisFromODSFile/Size:64/ProofType:col/squareHalf:1-16 1917 661510 ns/op +// BenchmarkAxisFromODSFile/Size:128/ProofType:row/squareHalf:0-16 119324 10425 ns/op +// BenchmarkAxisFromODSFile/Size:128/ProofType:row/squareHalf:1-16 163 9926752 ns/op +// BenchmarkAxisFromODSFile/Size:128/ProofType:col/squareHalf:0-16 1634124 726.2 ns/op +// BenchmarkAxisFromODSFile/Size:128/ProofType:col/squareHalf:1-16 205 5508394 ns/op +func BenchmarkAxisFromODSFile(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + b.Cleanup(cancel) + + minSize, maxSize := 32, 128 + eds.BenchGetHalfAxisFromAccessor(ctx, b, createODSAccessor, minSize, maxSize) +} + +// BenchmarkAxisFromODSFileDisabledCache/Size:32/ProofType:row/squareHalf:0-16 378975 3141 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:32/ProofType:row/squareHalf:1-16 1026 1175651 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:32/ProofType:col/squareHalf:0-16 80200 14721 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:32/ProofType:col/squareHalf:1-16 1014 1180527 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:64/ProofType:row/squareHalf:0-16 212041 5417 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:64/ProofType:row/squareHalf:1-16 253 4205953 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:64/ProofType:col/squareHalf:0-16 35289 34033 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:64/ProofType:col/squareHalf:1-16 325 3229517 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:128/ProofType:row/squareHalf:0-16 132261 8535 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:128/ProofType:row/squareHalf:1-16 48 22963229 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:128/ProofType:col/squareHalf:0-16 19053 62858 ns/op +// BenchmarkAxisFromODSFileDisabledCache/Size:128/ProofType:col/squareHalf:1-16 48 21185201 ns/op +func BenchmarkAxisFromODSFileDisabledCache(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + b.Cleanup(cancel) + + minSize, maxSize := 32, 128 + eds.BenchGetHalfAxisFromAccessor(ctx, b, createODSFileDisabledCache, minSize, maxSize) +} + +// BenchmarkSampleFromODSFile/Size:32/quadrant:1-16 13684 87558 ns/op +// BenchmarkSampleFromODSFile/Size:32/quadrant:2-16 13358 85677 ns/op +// BenchmarkSampleFromODSFile/Size:32/quadrant:3-16 10000 102631 ns/op +// BenchmarkSampleFromODSFile/Size:32/quadrant:4-16 5175 222615 ns/op +// BenchmarkSampleFromODSFile/Size:64/quadrant:1-16 7142 173784 ns/op +// BenchmarkSampleFromODSFile/Size:64/quadrant:2-16 6820 171602 ns/op +// BenchmarkSampleFromODSFile/Size:64/quadrant:3-16 5232 201875 ns/op +// BenchmarkSampleFromODSFile/Size:64/quadrant:4-16 1448 1035275 ns/op +// BenchmarkSampleFromODSFile/Size:128/quadrant:1-16 3829 359528 ns/op +// BenchmarkSampleFromODSFile/Size:128/quadrant:2-16 3303 358142 ns/op +// BenchmarkSampleFromODSFile/Size:128/quadrant:3-16 2666 431895 ns/op +// BenchmarkSampleFromODSFile/Size:128/quadrant:4-16 183 7347936 ns/op +func BenchmarkSampleFromODSFile(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + b.Cleanup(cancel) + + minSize, maxSize := 32, 128 + eds.BenchGetSampleFromAccessor(ctx, b, createODSAccessor, minSize, maxSize) +} + +// BenchmarkSampleFromODSFileDisabledCache/Size:32/quadrant:1 +// BenchmarkSampleFromODSFileDisabledCache/Size:32/quadrant:1-16 13152 85301 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:32/quadrant:2-16 14140 84876 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:32/quadrant:3-16 11756 102360 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:32/quadrant:4-16 985 1292232 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:64/quadrant:1-16 7678 172306 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:64/quadrant:2-16 5744 176533 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:64/quadrant:3-16 6022 207884 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:64/quadrant:4-16 304 3881858 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:128/quadrant:1-16 3697 355835 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:128/quadrant:2-16 3558 360162 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:128/quadrant:3-16 3027 410976 ns/op +// BenchmarkSampleFromODSFileDisabledCache/Size:128/quadrant:4-16 54 21796460 ns/op +func BenchmarkSampleFromODSFileDisabledCache(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + b.Cleanup(cancel) + + minSize, maxSize := 32, 128 + eds.BenchGetSampleFromAccessor(ctx, b, createODSFileDisabledCache, minSize, maxSize) +} + +func createODSAccessorStreamer(t testing.TB, eds *rsmt2d.ExtendedDataSquare) eds.AccessorStreamer { + return createODSFile(t, eds) +} + +func createODSAccessor(t testing.TB, eds *rsmt2d.ExtendedDataSquare) eds.Accessor { + return createODSFile(t, eds) +} + +func createCachedStreamer(t testing.TB, eds *rsmt2d.ExtendedDataSquare) eds.AccessorStreamer { + f := createODSFile(t, eds) + _, err := f.readODS() + require.NoError(t, err) + return f +} + +func createODSFile(t testing.TB, eds *rsmt2d.ExtendedDataSquare) *ODS { + path := t.TempDir() + "/" + strconv.Itoa(rand.Intn(1000)) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + err = CreateODS(path, roots, eds) + require.NoError(t, err) + ods, err := OpenODS(path) + require.NoError(t, err) + return ods +} + +func createODSFileDisabledCache(t testing.TB, eds *rsmt2d.ExtendedDataSquare) eds.Accessor { + ods := createODSFile(t, eds) + ods.disableCache = true + return ods +} diff --git a/store/file/q4.go b/store/file/q4.go new file mode 100644 index 0000000000..503ecfb397 --- /dev/null +++ b/store/file/q4.go @@ -0,0 +1,107 @@ +package file + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/eds" +) + +// q4 stores the fourth quadrant of the square. +type q4 struct { + hdr *headerV0 + file *os.File +} + +// createQ4 creates a new file under given FS path and +// writes the Q4 into it out of given EDS. +// It may leave partially written file if any of the writes fail. +func createQ4( + path string, + eds *rsmt2d.ExtendedDataSquare, +) error { + mod := os.O_RDWR | os.O_CREATE | os.O_EXCL // ensure we fail if already exist + f, err := os.OpenFile(path, mod, filePermissions) + if err != nil { + return fmt.Errorf("creating Q4 file: %w", err) + } + + err = writeQ4File(f, eds) + if errClose := f.Close(); errClose != nil { + err = errors.Join(err, fmt.Errorf("closing created Q4 file: %w", errClose)) + } + + return err +} + +// writeQ4File full Q4 content into OS File. +func writeQ4File(f *os.File, eds *rsmt2d.ExtendedDataSquare) error { + // buffering gives us ~4x speed up + buf := bufio.NewWriterSize(f, writeBufferSize) + + if err := writeQ4(buf, eds); err != nil { + return fmt.Errorf("writing Q4: %w", err) + } + + if err := buf.Flush(); err != nil { + return fmt.Errorf("flushing Q4: %w", err) + } + + return nil +} + +// writeQ4 writes the forth quadrant of the square to the writer. It writes the quadrant in +// row-major order. +func writeQ4(w io.Writer, eds *rsmt2d.ExtendedDataSquare) error { + half := eds.Width() / 2 + for i := range half { + for j := range half { + shr := eds.GetCell(i+half, j+half) // TODO: Avoid copying inside GetCell + _, err := w.Write(shr) + if err != nil { + return fmt.Errorf("writing share: %w", err) + } + } + } + return nil +} + +// openQ4 opens an existing Q4 file under given FS path. +func openQ4(path string, hdr *headerV0) (*q4, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + return &q4{ + hdr: hdr, + file: f, + }, nil +} + +func (q4 *q4) close() error { + return q4.file.Close() +} + +func (q4 *q4) axisHalf(axisType rsmt2d.Axis, axisIdx int) (eds.AxisHalf, error) { + size := q4.hdr.SquareSize() + q4AxisIdx := axisIdx - size/2 + if q4AxisIdx < 0 { + return eds.AxisHalf{}, fmt.Errorf("invalid axis index for Q4: %d", axisIdx) + } + + axisHalf, err := readAxisHalf(q4.file, axisType, q4AxisIdx, q4.hdr, 0) + if err != nil { + return eds.AxisHalf{}, fmt.Errorf("reading axis half from Q4: %w", err) + } + + return eds.AxisHalf{ + Shares: axisHalf, + IsParity: true, + }, nil +} diff --git a/store/file/square.go b/store/file/square.go new file mode 100644 index 0000000000..c1a432e3a8 --- /dev/null +++ b/store/file/square.go @@ -0,0 +1,139 @@ +package file + +import ( + "fmt" + "io" + + "golang.org/x/sync/errgroup" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" +) + +type square [][]share.Share + +// readSquare reads Shares from the reader and returns a square. It assumes that the reader is +// positioned at the beginning of the Shares. It knows the size of the Shares and the size of the +// square, so reads from reader are limited to exactly the amount of data required. +func readSquare(r io.Reader, shareSize, edsSize int) (square, error) { + odsLn := edsSize / 2 + + shares, err := eds.ReadShares(r, shareSize, odsLn) + if err != nil { + return nil, fmt.Errorf("reading shares: %w", err) + } + square := make(square, odsLn) + for i := range square { + square[i] = shares[i*odsLn : (i+1)*odsLn] + } + return square, nil +} + +func (s square) reader() (io.Reader, error) { + if s == nil { + return nil, fmt.Errorf("ods file not cached") + } + getShare := func(rowIdx, colIdx int) ([]byte, error) { + return s[rowIdx][colIdx], nil + } + reader := eds.NewShareReader(s.size(), getShare) + return reader, nil +} + +func (s square) size() int { + return len(s) +} + +func (s square) shares() ([]share.Share, error) { + shares := make([]share.Share, 0, s.size()*s.size()) + for _, row := range s { + shares = append(shares, row...) + } + return shares, nil +} + +func (s square) axisHalf(axisType rsmt2d.Axis, axisIdx int) (eds.AxisHalf, error) { + if s == nil { + return eds.AxisHalf{}, fmt.Errorf("square is nil") + } + + if axisIdx >= s.size() { + return eds.AxisHalf{}, fmt.Errorf("index is out of square bounds") + } + + // square stores rows directly in high level slice, so we can return by accessing row by index + if axisType == rsmt2d.Row { + row := s[axisIdx] + return eds.AxisHalf{ + Shares: row, + IsParity: false, + }, nil + } + + // construct half column from row ordered square + col := make([]share.Share, s.size()) + for i := 0; i < s.size(); i++ { + col[i] = s[i][axisIdx] + } + return eds.AxisHalf{ + Shares: col, + IsParity: false, + }, nil +} + +func (s square) computeAxisHalf( + axisType rsmt2d.Axis, + axisIdx int, +) (eds.AxisHalf, error) { + shares := make([]share.Share, s.size()) + + // extend opposite half of the square while collecting Shares for the first half of required axis + g := errgroup.Group{} + opposite := oppositeAxis(axisType) + for i := 0; i < s.size(); i++ { + g.Go(func() error { + half, err := s.axisHalf(opposite, i) + if err != nil { + return err + } + + enc, err := codec.Encoder(s.size() * 2) + if err != nil { + return fmt.Errorf("getting encoder: %w", err) + } + + shards := make([][]byte, s.size()*2) + if half.IsParity { + copy(shards[s.size():], half.Shares) + } else { + copy(shards, half.Shares) + } + + target := make([]bool, s.size()*2) + target[axisIdx] = true + + err = enc.ReconstructSome(shards, target) + if err != nil { + return fmt.Errorf("reconstruct some: %w", err) + } + + shares[i] = shards[axisIdx] + return nil + }) + } + + err := g.Wait() + return eds.AxisHalf{ + Shares: shares, + IsParity: false, + }, err +} + +func oppositeAxis(axis rsmt2d.Axis) rsmt2d.Axis { + if axis == rsmt2d.Col { + return rsmt2d.Row + } + return rsmt2d.Col +} diff --git a/store/getter.go b/store/getter.go new file mode 100644 index 0000000000..7a78aa8fc5 --- /dev/null +++ b/store/getter.go @@ -0,0 +1,94 @@ +package store + +import ( + "context" + "errors" + "fmt" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +var _ shwap.Getter = (*Getter)(nil) + +type Getter struct { + store *Store +} + +func NewGetter(store *Store) *Getter { + return &Getter{store: store} +} + +func (g *Getter) GetShare(ctx context.Context, h *header.ExtendedHeader, row, col int) (share.Share, error) { + acc, err := g.store.GetByHeight(ctx, h.Height()) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, shwap.ErrNotFound + } + return nil, fmt.Errorf("get accessor from store:%w", err) + } + logger := log.With( + "height", h.Height(), + "row", row, + "col", col, + ) + defer utils.CloseAndLog(logger, "getter/sample", acc) + + sample, err := acc.Sample(ctx, row, col) + if err != nil { + return nil, fmt.Errorf("get sample from accessor:%w", err) + } + return sample.Share, nil +} + +func (g *Getter) GetEDS(ctx context.Context, h *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + acc, err := g.store.GetByHeight(ctx, h.Height()) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, shwap.ErrNotFound + } + return nil, fmt.Errorf("get accessor from store:%w", err) + } + logger := log.With("height", h.Height()) + defer utils.CloseAndLog(logger, "getter/eds", acc) + + shares, err := acc.Shares(ctx) + if err != nil { + return nil, fmt.Errorf("get shares from accessor:%w", err) + } + rsmt2d, err := eds.Rsmt2DFromShares(shares, len(h.DAH.RowRoots)/2) + if err != nil { + return nil, fmt.Errorf("build eds from shares:%w", err) + } + return rsmt2d.ExtendedDataSquare, nil +} + +func (g *Getter) GetSharesByNamespace( + ctx context.Context, + h *header.ExtendedHeader, + ns share.Namespace, +) (shwap.NamespaceData, error) { + acc, err := g.store.GetByHeight(ctx, h.Height()) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, shwap.ErrNotFound + } + return nil, fmt.Errorf("get accessor from store:%w", err) + } + logger := log.With( + "height", h.Height(), + "namespace", ns.String(), + ) + defer utils.CloseAndLog(logger, "getter/nd", acc) + + nd, err := eds.NamespaceData(ctx, acc, ns) + if err != nil { + return nil, fmt.Errorf("get nd from accessor:%w", err) + } + return nd, nil +} diff --git a/store/getter_test.go b/store/getter_test.go new file mode 100644 index 0000000000..9e2b06feda --- /dev/null +++ b/store/getter_test.go @@ -0,0 +1,92 @@ +package store + +import ( + "context" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/sharetest" + "github.com/celestiaorg/celestia-node/share/shwap" +) + +func TestStoreGetter(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + edsStore, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + sg := NewGetter(edsStore) + height := atomic.Uint64{} + + t.Run("GetShare", func(t *testing.T) { + eds, roots := randomEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + + err := edsStore.PutODSQ4(ctx, eh.DAH, height, eds) + require.NoError(t, err) + + squareSize := int(eds.Width()) + for i := 0; i < squareSize; i++ { + for j := 0; j < squareSize; j++ { + share, err := sg.GetShare(ctx, eh, i, j) + require.NoError(t, err) + require.Equal(t, eds.GetCell(uint(i), uint(j)), share) + } + } + + // doesn't panic on indexes too high + _, err = sg.GetShare(ctx, eh, squareSize, squareSize) + require.ErrorIs(t, err, shwap.ErrOutOfBounds) + }) + + t.Run("GetEDS", func(t *testing.T) { + eds, roots := randomEDS(t) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + + err := edsStore.PutODSQ4(ctx, eh.DAH, height, eds) + require.NoError(t, err) + + retrievedEDS, err := sg.GetEDS(ctx, eh) + require.NoError(t, err) + require.True(t, eds.Equals(retrievedEDS)) + + // root not found + eh.RawHeader.Height = 666 + _, err = sg.GetEDS(ctx, eh) + require.ErrorIs(t, err, shwap.ErrNotFound) + }) + + t.Run("GetSharesByNamespace", func(t *testing.T) { + ns := sharetest.RandV0Namespace() + eds, roots := edstest.RandEDSWithNamespace(t, ns, 8, 16) + eh := headertest.RandExtendedHeaderWithRoot(t, roots) + height := height.Add(1) + eh.RawHeader.Height = int64(height) + err := edsStore.PutODSQ4(ctx, eh.DAH, height, eds) + require.NoError(t, err) + + shares, err := sg.GetSharesByNamespace(ctx, eh, ns) + require.NoError(t, err) + require.NoError(t, shares.Verify(eh.DAH, ns)) + + // namespace not found + randNamespace := sharetest.RandV0Namespace() + emptyShares, err := sg.GetSharesByNamespace(ctx, eh, randNamespace) + require.NoError(t, err) + require.Empty(t, emptyShares.Flatten()) + + // root not found + eh.RawHeader.Height = 666 + _, err = sg.GetSharesByNamespace(ctx, eh, ns) + require.ErrorIs(t, err, shwap.ErrNotFound) + }) +} diff --git a/store/metrics.go b/store/metrics.go new file mode 100644 index 0000000000..53720493e6 --- /dev/null +++ b/store/metrics.go @@ -0,0 +1,180 @@ +package store + +import ( + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/celestiaorg/celestia-node/store/cache" +) + +const ( + failedKey = "failed" + withQ4Key = "with_q4" + sizeKey = "eds_size" +) + +var meter = otel.Meter("store") + +type metrics struct { + put metric.Float64Histogram + putExists metric.Int64Counter + get metric.Float64Histogram + has metric.Float64Histogram + removeODSQ4 metric.Float64Histogram + removeQ4 metric.Float64Histogram + unreg func() error +} + +func (s *Store) WithMetrics() error { + put, err := meter.Float64Histogram("eds_store_put_time_histogram", + metric.WithDescription("eds store put time histogram(s)")) + if err != nil { + return err + } + + putExists, err := meter.Int64Counter("eds_store_put_exists_counter", + metric.WithDescription("eds store put file exists")) + if err != nil { + return err + } + + get, err := meter.Float64Histogram("eds_store_get_time_histogram", + metric.WithDescription("eds store get time histogram(s)")) + if err != nil { + return err + } + + has, err := meter.Float64Histogram("eds_store_has_time_histogram", + metric.WithDescription("eds store has time histogram(s)")) + if err != nil { + return err + } + + removeQ4, err := meter.Float64Histogram("eds_store_remove_q4_time_histogram", + metric.WithDescription("eds store remove q4 data time histogram(s)")) + if err != nil { + return err + } + + removeODSQ4, err := meter.Float64Histogram("eds_store_remove_odsq4_time_histogram", + metric.WithDescription("eds store remove odsq4 file data time histogram(s)")) + if err != nil { + return err + } + + s.metrics = &metrics{ + put: put, + putExists: putExists, + get: get, + has: has, + removeODSQ4: removeODSQ4, + removeQ4: removeQ4, + } + return s.metrics.addCacheMetrics(s.cache) +} + +// addCacheMetrics adds cache metrics to store metrics +func (m *metrics) addCacheMetrics(c cache.Cache) error { + if m == nil { + return nil + } + unreg, err := c.EnableMetrics() + if err != nil { + return fmt.Errorf("while enabling metrics for cache: %w", err) + } + m.unreg = unreg + return nil +} + +func (m *metrics) observePut( + ctx context.Context, + dur time.Duration, + size uint, + withQ4 bool, + failed bool, +) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.put.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed), + attribute.Bool(withQ4Key, withQ4), + attribute.Int(sizeKey, int(size)), + ), + ) +} + +func (m *metrics) observePutExist(ctx context.Context) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.putExists.Add(ctx, 1) +} + +func (m *metrics) observeGet(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.get.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeHas(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.has.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeRemoveODSQ4(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.removeODSQ4.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) observeRemoveQ4(ctx context.Context, dur time.Duration, failed bool) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.removeQ4.Record(ctx, dur.Seconds(), metric.WithAttributes( + attribute.Bool(failedKey, failed))) +} + +func (m *metrics) close() error { + if m == nil { + return nil + } + return m.unreg() +} diff --git a/store/store.go b/store/store.go new file mode 100644 index 0000000000..ac8951e50b --- /dev/null +++ b/store/store.go @@ -0,0 +1,507 @@ +package store + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/libs/utils" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store/cache" + "github.com/celestiaorg/celestia-node/store/file" +) + +var log = logging.Logger("edsstore") + +const ( + blocksPath = "blocks" + heightsPath = blocksPath + "/heights" + odsFileExt = ".ods" + q4FileExt = ".q4" + defaultDirPerm = 0o755 +) + +var ErrNotFound = errors.New("eds not found in store") + +// Store is a storage for EDS files. It persists EDS files on disk in form of Q1Q4 files or ODS +// files. It provides methods to put, get and remove EDS files. It has two caches: recent eds cache +// and availability cache. Recent eds cache is used to cache recent blocks. Availability cache is +// used to cache blocks that are accessed by sample requests. Store is thread-safe. +type Store struct { + // basepath is the root directory of the store + basepath string + // cache is used to cache recent blocks and blocks that are accessed frequently + cache cache.Cache + // stripedLocks is used to synchronize parallel operations + stripLock *striplock + metrics *metrics +} + +// NewStore creates a new EDS Store under the given basepath and datastore. +func NewStore(params *Parameters, basePath string) (*Store, error) { + err := params.Validate() + if err != nil { + return nil, err + } + + // ensure the blocks dir exists + blocksDir := filepath.Join(basePath, blocksPath) + if err := mkdir(blocksDir); err != nil { + return nil, fmt.Errorf("ensuring blocks directory: %w", err) + } + + // ensure the heights dir exists + heightsDir := filepath.Join(basePath, heightsPath) + if err := mkdir(heightsDir); err != nil { + return nil, fmt.Errorf("ensuring heights directory: %w", err) + } + + var recentCache cache.Cache = cache.NoopCache{} + if params.RecentBlocksCacheSize > 0 { + recentCache, err = cache.NewAccessorCache("recent", params.RecentBlocksCacheSize) + if err != nil { + return nil, fmt.Errorf("failed to create recent eds cache: %w", err) + } + } + + store := &Store{ + basepath: basePath, + cache: recentCache, + stripLock: newStripLock(1024), + } + + if err := store.populateEmptyFile(); err != nil { + return nil, fmt.Errorf("ensuring empty EDS: %w", err) + } + + return store, nil +} + +func (s *Store) Stop(context.Context) error { + return s.metrics.close() +} + +func (s *Store) PutODSQ4( + ctx context.Context, + roots *share.AxisRoots, + height uint64, + square *rsmt2d.ExtendedDataSquare, +) error { + return s.put(ctx, roots, height, square, true) +} + +func (s *Store) PutODS( + ctx context.Context, + roots *share.AxisRoots, + height uint64, + square *rsmt2d.ExtendedDataSquare, +) error { + return s.put(ctx, roots, height, square, false) +} + +func (s *Store) put( + ctx context.Context, + roots *share.AxisRoots, + height uint64, + square *rsmt2d.ExtendedDataSquare, + writeQ4 bool, +) error { + datahash := share.DataHash(roots.Hash()) + // we don't need to store empty EDS, just link the height to the empty file + if datahash.IsEmptyEDS() { + lock := s.stripLock.byHeight(height) + lock.Lock() + err := s.linkHeight(datahash, height) + lock.Unlock() + return err + } + + // put to cache before writing to make it accessible while write is happening + accessor := &eds.Rsmt2D{ExtendedDataSquare: square} + acc, err := s.cache.GetOrLoad(ctx, height, accessorLoader(accessor)) + if err != nil { + log.Warnf("failed to put Accessor in the recent cache: %s", err) + } else { + // release the ref link to the accessor + utils.CloseAndLog(log, "recent accessor", acc) + } + + tNow := time.Now() + lock := s.stripLock.byHashAndHeight(datahash, height) + lock.lock() + defer lock.unlock() + + var exists bool + if writeQ4 { + exists, err = s.createODSQ4File(square, roots, height) + } else { + exists, err = s.createODSFile(square, roots, height) + } + + if exists { + s.metrics.observePutExist(ctx) + return nil + } + if err != nil { + s.metrics.observePut(ctx, time.Since(tNow), square.Width(), writeQ4, true) + return fmt.Errorf("creating file: %w", err) + } + + s.metrics.observePut(ctx, time.Since(tNow), square.Width(), writeQ4, false) + return nil +} + +func (s *Store) createODSQ4File( + square *rsmt2d.ExtendedDataSquare, + roots *share.AxisRoots, + height uint64, +) (bool, error) { + pathODS := s.hashToPath(roots.Hash(), odsFileExt) + pathQ4 := s.hashToPath(roots.Hash(), q4FileExt) + + err := file.CreateODSQ4(pathODS, pathQ4, roots, square) + if errors.Is(err, os.ErrExist) { + // TODO(@Wondertan): Should we verify that the exist file is correct? + return true, nil + } + if err != nil { + // ensure we don't have partial writes if any operation fails + removeErr := s.removeODSQ4(height, roots.Hash()) + return false, errors.Join( + fmt.Errorf("creating ODSQ4 file: %w", err), + removeErr, + ) + } + + // create hard link with height as name + err = s.linkHeight(roots.Hash(), height) + if err != nil { + // ensure we don't have partial writes if any operation fails + removeErr := s.removeODSQ4(height, roots.Hash()) + return false, errors.Join( + fmt.Errorf("hardlinking height: %w", err), + removeErr, + ) + } + return false, nil +} + +func (s *Store) createODSFile( + square *rsmt2d.ExtendedDataSquare, + roots *share.AxisRoots, + height uint64, +) (bool, error) { + pathODS := s.hashToPath(roots.Hash(), odsFileExt) + err := file.CreateODS(pathODS, roots, square) + if errors.Is(err, os.ErrExist) { + // TODO(@Wondertan): Should we verify that the exist file is correct? + return true, nil + } + if err != nil { + // ensure we don't have partial writes if any operation fails + removeErr := s.removeODS(height, roots.Hash()) + return false, errors.Join( + fmt.Errorf("creating ODS file: %w", err), + removeErr, + ) + } + + // create hard link with height as name + err = s.linkHeight(roots.Hash(), height) + if err != nil { + // ensure we don't have partial writes if any operation fails + removeErr := s.removeODS(height, roots.Hash()) + return false, errors.Join( + fmt.Errorf("hardlinking height: %w", err), + removeErr, + ) + } + return false, nil +} + +func (s *Store) linkHeight(datahash share.DataHash, height uint64) error { + // create hard link with height as name + pathOds := s.hashToPath(datahash, odsFileExt) + linktoOds := s.heightToPath(height, odsFileExt) + if datahash.IsEmptyEDS() { + // empty EDS is always symlinked, because there is limited number of hardlinks + // for the same file in some filesystems (ext4) + return symlink(pathOds, linktoOds) + } + return hardLink(pathOds, linktoOds) +} + +// populateEmptyFile writes fresh empty EDS file on disk. +// It overrides existing empty file to ensure disk format is always consistent with the canonical +// in-mem representation. +func (s *Store) populateEmptyFile() error { + pathOds := s.hashToPath(share.EmptyEDSDataHash(), odsFileExt) + pathQ4 := s.hashToPath(share.EmptyEDSDataHash(), q4FileExt) + + err := errors.Join(remove(pathOds), remove(pathQ4)) + if err != nil { + return fmt.Errorf("cleaning old empty EDS file: %w", err) + } + + err = file.CreateODSQ4(pathOds, pathQ4, share.EmptyEDSRoots(), eds.EmptyAccessor.ExtendedDataSquare) + if err != nil { + return fmt.Errorf("creating fresh empty EDS file: %w", err) + } + + return nil +} + +func (s *Store) GetByHash(ctx context.Context, datahash share.DataHash) (eds.AccessorStreamer, error) { + if datahash.IsEmptyEDS() { + return eds.EmptyAccessor, nil + } + lock := s.stripLock.byHash(datahash) + lock.RLock() + defer lock.RUnlock() + + tNow := time.Now() + f, err := s.getByHash(ctx, datahash) + s.metrics.observeGet(ctx, time.Since(tNow), err != nil) + return f, err +} + +func (s *Store) getByHash(ctx context.Context, datahash share.DataHash) (eds.AccessorStreamer, error) { + path := s.hashToPath(datahash, odsFileExt) + return s.openAccessor(ctx, path) +} + +func (s *Store) GetByHeight(ctx context.Context, height uint64) (eds.AccessorStreamer, error) { + lock := s.stripLock.byHeight(height) + lock.RLock() + defer lock.RUnlock() + + tNow := time.Now() + f, err := s.getByHeight(ctx, height) + s.metrics.observeGet(ctx, time.Since(tNow), err != nil) + return f, err +} + +func (s *Store) getByHeight(ctx context.Context, height uint64) (eds.AccessorStreamer, error) { + f, err := s.cache.Get(height) + if err == nil { + return f, nil + } + + path := s.heightToPath(height, odsFileExt) + return s.openAccessor(ctx, path) +} + +// openAccessor opens ODSQ4 Accessor. +// It opens ODS file first, reads up its DataHash and constructs the path for Q4 +// This done as Q4 is not indexed(hard-linked) and there is no other way to Q4 by height only. +func (s *Store) openAccessor(ctx context.Context, path string) (eds.AccessorStreamer, error) { + ods, err := file.OpenODS(path) + if errors.Is(err, os.ErrNotExist) { + return nil, ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("failed to open ODS: %w", err) + } + + // read datahash from ODS and construct Q4 path + datahash, err := ods.DataHash(ctx) + if err != nil { + utils.CloseAndLog(log, "open ods", ods) + return nil, fmt.Errorf("reading datahash: %w", err) + } + pathQ4 := s.hashToPath(datahash, q4FileExt) + odsQ4 := file.ODSWithQ4(ods, pathQ4) + return wrapAccessor(odsQ4), nil +} + +func (s *Store) HasByHash(ctx context.Context, datahash share.DataHash) (bool, error) { + if datahash.IsEmptyEDS() { + return true, nil + } + + lock := s.stripLock.byHash(datahash) + lock.RLock() + defer lock.RUnlock() + + tNow := time.Now() + exist, err := s.hasByHash(datahash) + s.metrics.observeHas(ctx, time.Since(tNow), err != nil) + return exist, err +} + +func (s *Store) hasByHash(datahash share.DataHash) (bool, error) { + // For now, we assume that if ODS exists, the Q4 exists as well. + path := s.hashToPath(datahash, odsFileExt) + return exists(path) +} + +func (s *Store) HasByHeight(ctx context.Context, height uint64) (bool, error) { + lock := s.stripLock.byHeight(height) + lock.RLock() + defer lock.RUnlock() + + tNow := time.Now() + exist, err := s.hasByHeight(height) + s.metrics.observeHas(ctx, time.Since(tNow), err != nil) + return exist, err +} + +func (s *Store) hasByHeight(height uint64) (bool, error) { + acc, err := s.cache.Get(height) + if err == nil { + utils.CloseAndLog(log, "accessor", acc) + return true, nil + } + + // For now, we assume that if ODS exists, the Q4 exists as well. + pathODS := s.heightToPath(height, odsFileExt) + return exists(pathODS) +} + +func (s *Store) RemoveODSQ4(ctx context.Context, height uint64, datahash share.DataHash) error { + lock := s.stripLock.byHashAndHeight(datahash, height) + lock.lock() + defer lock.unlock() + + tNow := time.Now() + err := s.removeODSQ4(height, datahash) + s.metrics.observeRemoveODSQ4(ctx, time.Since(tNow), err != nil) + return err +} + +func (s *Store) removeODSQ4(height uint64, datahash share.DataHash) error { + if err := s.removeODS(height, datahash); err != nil { + return fmt.Errorf("removing ODS: %w", err) + } + if err := s.removeQ4(height, datahash); err != nil { + return fmt.Errorf("removing Q4: %w", err) + } + return nil +} + +func (s *Store) removeODS(height uint64, datahash share.DataHash) error { + if err := s.cache.Remove(height); err != nil { + return fmt.Errorf("removing from cache: %w", err) + } + + pathLink := s.heightToPath(height, odsFileExt) + if err := remove(pathLink); err != nil { + return fmt.Errorf("removing hardlink: %w", err) + } + + // if datahash is empty, we don't need to remove the ODS file, only the hardlink + if datahash.IsEmptyEDS() { + return nil + } + + pathODS := s.hashToPath(datahash, odsFileExt) + if err := remove(pathODS); err != nil { + return fmt.Errorf("removing ODS file: %w", err) + } + return nil +} + +func (s *Store) RemoveQ4(ctx context.Context, height uint64, datahash share.DataHash) error { + lock := s.stripLock.byHashAndHeight(datahash, height) + lock.lock() + defer lock.unlock() + + tNow := time.Now() + err := s.removeQ4(height, datahash) + s.metrics.observeRemoveQ4(ctx, time.Since(tNow), err != nil) + return err +} + +func (s *Store) removeQ4(height uint64, datahash share.DataHash) error { + // if datahash is empty, we don't need to remove the Q4 file + if datahash.IsEmptyEDS() { + return nil + } + + if err := s.cache.Remove(height); err != nil { + return fmt.Errorf("removing from cache: %w", err) + } + + // remove Q4 file + pathQ4File := s.hashToPath(datahash, q4FileExt) + if err := remove(pathQ4File); err != nil { + return fmt.Errorf("removing Q4 file: %w", err) + } + return nil +} + +func (s *Store) hashToPath(datahash share.DataHash, ext string) string { + return filepath.Join(s.basepath, blocksPath, datahash.String()) + ext +} + +func (s *Store) heightToPath(height uint64, ext string) string { + return filepath.Join(s.basepath, heightsPath, strconv.Itoa(int(height))) + ext +} + +func accessorLoader(accessor eds.AccessorStreamer) cache.OpenAccessorFn { + return func(context.Context) (eds.AccessorStreamer, error) { + return wrapAccessor(accessor), nil + } +} + +func wrapAccessor(accessor eds.AccessorStreamer) eds.AccessorStreamer { + withCache := eds.WithProofsCache(accessor) + closedOnce := eds.WithClosedOnce(withCache) + sanityChecked := eds.WithValidation(closedOnce) + accessorStreamer := eds.AccessorAndStreamer(sanityChecked, closedOnce) + return accessorStreamer +} + +func mkdir(path string) error { + err := os.Mkdir(path, defaultDirPerm) + if err != nil && !errors.Is(err, os.ErrExist) { + return fmt.Errorf("making directory '%s': %w", path, err) + } + + return nil +} + +func hardLink(filepath, linkpath string) error { + err := os.Link(filepath, linkpath) + if err != nil && !errors.Is(err, os.ErrExist) { + return fmt.Errorf("creating hardlink (%s -> %s): %w", filepath, linkpath, err) + } + return nil +} + +func symlink(filepath, linkpath string) error { + err := os.Symlink(filepath, linkpath) + if err != nil && !errors.Is(err, os.ErrExist) { + return fmt.Errorf("creating symlink (%s -> %s): %w", filepath, linkpath, err) + } + return nil +} + +func exists(path string) (bool, error) { + _, err := os.Stat(path) + switch { + case err == nil: + return true, nil + case errors.Is(err, os.ErrNotExist): + return false, nil + default: + return false, fmt.Errorf("checking file existence '%s': %w", path, err) + } +} + +func remove(path string) error { + err := os.Remove(path) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("removing file '%s': %w", path, err) + } + return nil +} diff --git a/store/store_cache.go b/store/store_cache.go new file mode 100644 index 0000000000..7d9de15cb1 --- /dev/null +++ b/store/store_cache.go @@ -0,0 +1,69 @@ +package store + +import ( + "context" + "fmt" + + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/store/cache" +) + +// CachedStore is a store with an additional cache layer. New cache layer is created on top of the +// original store cache. Parent store cache will be able to read from the new cache layer, but will +// not be able to write to it. Making parent store cache and CachedStore cache independent for writes. +type CachedStore struct { + store *Store + combinedCache *cache.DoubleCache +} + +// WithCache wraps store with extra layer of cache. Created caching layer will have read access to original +// store cache and will duplicate it's content. It updates parent store cache, to allow it to +// read from additionally created cache layer. +func (s *Store) WithCache(name string, size int) (*CachedStore, error) { + if size <= 0 { + return nil, fmt.Errorf("cache size must be positive, got %d", size) + } + newCache, err := cache.NewAccessorCache(name, size) + if err != nil { + return nil, fmt.Errorf("failed to create %s cache: %w", name, err) + } + + wrappedCache := cache.NewDoubleCache(s.cache, newCache) + err = s.metrics.addCacheMetrics(wrappedCache) + if err != nil { + return nil, fmt.Errorf("failed to add cache metrics: %w", err) + } + // update parent store cache to allow it to read from both caches + s.cache = wrappedCache + return &CachedStore{ + store: s, + combinedCache: wrappedCache, + }, nil +} + +// HasByHeight checks if accessor for the height is present. +func (cs *CachedStore) HasByHeight(ctx context.Context, height uint64) (bool, error) { + if cs.combinedCache.Has(height) { + return true, nil + } + + return cs.store.HasByHeight(ctx, height) +} + +// GetByHeight returns accessor for given height and puts it into cache. +func (cs *CachedStore) GetByHeight(ctx context.Context, height uint64) (eds.AccessorStreamer, error) { + acc, err := cs.combinedCache.First().Get(height) + if err == nil { + return acc, nil + } + return cs.combinedCache.Second().GetOrLoad(ctx, height, cs.openFile(height)) +} + +func (cs *CachedStore) openFile(height uint64) cache.OpenAccessorFn { + return func(ctx context.Context) (eds.AccessorStreamer, error) { + // open file directly without calling GetByHeight of inner getter to + // avoid hitting store cache second time + path := cs.store.heightToPath(height, odsFileExt) + return cs.store.openAccessor(ctx, path) + } +} diff --git a/store/store_cache_test.go b/store/store_cache_test.go new file mode 100644 index 0000000000..fdfe01a550 --- /dev/null +++ b/store/store_cache_test.go @@ -0,0 +1,108 @@ +package store + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/celestia-node/store/cache" +) + +func TestStore_WithCache(t *testing.T) { + height := atomic.Uint64{} + height.Store(1) + + t.Run("don't exist in first cache", func(t *testing.T) { + // create store with no cache + params := paramsNoCache() + store, err := NewStore(params, t.TempDir()) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + eds, roots := randomEDS(t) + height := height.Add(1) + err = store.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + // check that the height is not in the cache (cache was disabled) + _, err = store.cache.Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + + cachedStore, err := store.WithCache("test", 10) + require.NoError(t, err) + // load accessor to secondary cache by calling GetByHeight on cached store + acc, err := cachedStore.GetByHeight(ctx, height) + require.NoError(t, err) + require.NoError(t, acc.Close()) + + // loaded accessor should be available in both original store and wrapped store + acc, err = store.cache.Get(height) + require.NoError(t, err) + require.NoError(t, acc.Close()) + acc, err = cachedStore.combinedCache.Get(height) + require.NoError(t, err) + require.NoError(t, acc.Close()) + }) + + t.Run("exists in first cache", func(t *testing.T) { + store, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + eds, roots := randomEDS(t) + height := height.Add(1) + err = store.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + acc, err := store.cache.Get(height) + require.NoError(t, err) + require.NoError(t, acc.Close()) + + withCache, err := store.WithCache("test", 10) + require.NoError(t, err) + acc, err = withCache.GetByHeight(ctx, height) + require.NoError(t, err) + require.NoError(t, acc.Close()) + + _, err = withCache.combinedCache.Second().Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + }) + + t.Run("Has", func(t *testing.T) { + store, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + eds, roots := randomEDS(t) + height := height.Add(1) + err = store.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + withCache, err := store.WithCache("test", 10) + require.NoError(t, err) + + has, err := withCache.HasByHeight(ctx, height) + require.NoError(t, err) + require.True(t, has) + + // load up into cache + acc, err := withCache.GetByHeight(ctx, height) + require.NoError(t, err) + require.NoError(t, acc.Close()) + + has = withCache.combinedCache.Has(height) + require.True(t, has) + }) +} + +func paramsNoCache() *Parameters { + params := DefaultParameters() + params.RecentBlocksCacheSize = 0 + return params +} diff --git a/store/store_options.go b/store/store_options.go new file mode 100644 index 0000000000..68299db7f1 --- /dev/null +++ b/store/store_options.go @@ -0,0 +1,24 @@ +package store + +import ( + "errors" +) + +type Parameters struct { + // RecentBlocksCacheSize is the size of the cache for recent blocks. + RecentBlocksCacheSize int +} + +// DefaultParameters returns the default configuration values for the EDS store parameters. +func DefaultParameters() *Parameters { + return &Parameters{ + RecentBlocksCacheSize: 10, + } +} + +func (p *Parameters) Validate() error { + if p.RecentBlocksCacheSize < 0 { + return errors.New("recent eds cache size cannot be negative") + } + return nil +} diff --git a/store/store_test.go b/store/store_test.go new file mode 100644 index 0000000000..cb038e5349 --- /dev/null +++ b/store/store_test.go @@ -0,0 +1,435 @@ +package store + +import ( + "context" + "os" + "path" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/store/cache" +) + +func TestEDSStore(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + dir := t.TempDir() + edsStore, err := NewStore(paramsNoCache(), dir) + require.NoError(t, err) + + // disable cache + height := atomic.Uint64{} + height.Store(100) + + t.Run("Put", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(paramsNoCache(), dir) + require.NoError(t, err) + + eds, roots := randomEDS(t) + height := height.Add(1) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + // file should exist in the store + hasByHashAndHeight(t, edsStore, ctx, roots.Hash(), height, true, true) + + // block folder should contain ods and q4 files for the block and 1 link + ensureAmountFileAndLinks(t, dir, 2, 1) + }) + + t.Run("Cached after Put", func(t *testing.T) { + edsStore, err := NewStore(DefaultParameters(), t.TempDir()) + require.NoError(t, err) + + eds, roots := randomEDS(t) + height := height.Add(1) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + // file should be cached after put + f, err := edsStore.cache.Get(height) + require.NoError(t, err) + require.NoError(t, f.Close()) + + // check that cached file is the same eds + fromFile, err := f.Shares(ctx) + require.NoError(t, err) + require.NoError(t, f.Close()) + expected := eds.FlattenedODS() + require.Equal(t, expected, fromFile) + }) + + t.Run("Second Put should be noop", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(paramsNoCache(), dir) + require.NoError(t, err) + + eds, roots := randomEDS(t) + height := height.Add(1) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + // ensure file is written. There should be only ods + q4 files and 1 link + ensureAmountFileAndLinks(t, dir, 2, 1) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + // ensure file is not duplicated. + ensureAmountFileAndLinks(t, dir, 2, 1) + }) + + t.Run("GetByHeight", func(t *testing.T) { + eds, roots := randomEDS(t) + height := height.Add(1) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + f, err := edsStore.GetByHeight(ctx, height) + require.NoError(t, err) + + // check that cached file is the same eds + fromFile, err := f.Shares(ctx) + require.NoError(t, err) + require.NoError(t, f.Close()) + expected := eds.FlattenedODS() + require.Equal(t, expected, fromFile) + }) + + t.Run("GetByHash", func(t *testing.T) { + eds, roots := randomEDS(t) + height := height.Add(1) + + err := edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + f, err := edsStore.GetByHash(ctx, roots.Hash()) + require.NoError(t, err) + + // check that cached file is the same eds + fromFile, err := f.Shares(ctx) + require.NoError(t, err) + require.NoError(t, f.Close()) + expected := eds.FlattenedODS() + require.Equal(t, expected, fromFile) + }) + + t.Run("Does not exist", func(t *testing.T) { + _, roots := randomEDS(t) + height := height.Add(1) + + // file does not exist + hasByHashAndHeight(t, edsStore, ctx, roots.Hash(), height, false, false) + + _, err = edsStore.GetByHeight(ctx, height) + require.ErrorIs(t, err, ErrNotFound) + + _, err = edsStore.GetByHash(ctx, roots.Hash()) + require.ErrorIs(t, err, ErrNotFound) + }) + + t.Run("RemoveODSQ4", func(t *testing.T) { + t.Run("empty file", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + height := height.Add(1) + hash := share.EmptyEDSDataHash() + err = edsStore.PutODSQ4(ctx, share.EmptyEDSRoots(), height, share.EmptyEDS()) + require.NoError(t, err) + ensureAmountFileAndLinks(t, dir, 0, 1) + + err = edsStore.RemoveODSQ4(ctx, height, hash) + require.NoError(t, err) + + // file should be removed from cache + _, err = edsStore.cache.Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + + // empty file should be accessible by hash, but not by height + hasByHashAndHeight(t, edsStore, ctx, hash, height, true, false) + + // ensure all files and links are removed + ensureAmountFileAndLinks(t, dir, 0, 0) + }) + + t.Run("non-empty file", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + // removing file that does not exist should be noop + missingHeight := height.Add(1) + err = edsStore.RemoveODSQ4(ctx, missingHeight, share.DataHash{0x01, 0x02}) + require.NoError(t, err) + + eds, roots := randomEDS(t) + height := height.Add(1) + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + // ensure file is written + ensureAmountFileAndLinks(t, dir, 2, 1) + + err = edsStore.RemoveODSQ4(ctx, height, roots.Hash()) + require.NoError(t, err) + + // file should be removed from cache + _, err = edsStore.cache.Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + + // file should not be accessible by hash or height + hasByHashAndHeight(t, edsStore, ctx, roots.Hash(), height, false, false) + + // ensure file and link are removed + ensureAmountFileAndLinks(t, dir, 0, 0) + + // subsequent removeODSQ4 should be noop + err = edsStore.RemoveODSQ4(ctx, height, roots.Hash()) + require.NoError(t, err) + }) + }) + + t.Run("RemoveQ4", func(t *testing.T) { + t.Run("empty file", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + height := height.Add(1) + hash := share.EmptyEDSDataHash() + err = edsStore.PutODSQ4(ctx, share.EmptyEDSRoots(), height, share.EmptyEDS()) + require.NoError(t, err) + // empty file is not counted as a file + ensureAmountFileAndLinks(t, dir, 0, 1) + + err = edsStore.RemoveQ4(ctx, height, hash) + require.NoError(t, err) + + // file should be removed from cache + _, err = edsStore.cache.Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + + // empty file should be accessible by hash and by height + hasByHashAndHeight(t, edsStore, ctx, hash, height, true, true) + + // ensure ods file and link are not removed + ensureAmountFileAndLinks(t, dir, 0, 1) + }) + + t.Run("non-empty file", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + square, roots := randomEDS(t) + height := height.Add(1) + err = edsStore.PutODSQ4(ctx, roots, height, square) + require.NoError(t, err) + + err = edsStore.RemoveQ4(ctx, height, roots.Hash()) + require.NoError(t, err) + + // file should be removed from cache + _, err = edsStore.cache.Get(height) + require.ErrorIs(t, err, cache.ErrCacheMiss) + + // ODS file should be accessible by hash and by height + hasByHashAndHeight(t, edsStore, ctx, roots.Hash(), height, true, true) + + // ensure ods file and link are not removed + ensureAmountFileAndLinks(t, dir, 1, 1) + }) + }) + + t.Run("empty EDS returned by hash", func(t *testing.T) { + eds := share.EmptyEDS() + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + // assert that the empty file exists + has, err := edsStore.HasByHash(ctx, roots.Hash()) + require.NoError(t, err) + require.True(t, has) + + // assert that the empty file is, in fact, empty + f, err := edsStore.GetByHash(ctx, roots.Hash()) + require.NoError(t, err) + hash, err := f.DataHash(ctx) + require.NoError(t, err) + require.True(t, hash.IsEmptyEDS()) + }) + + t.Run("empty EDS returned by height", func(t *testing.T) { + eds := share.EmptyEDS() + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + height := height.Add(1) + + // assert that the empty file exists + has, err := edsStore.HasByHeight(ctx, height) + require.NoError(t, err) + require.False(t, has) + + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(t, err) + + // assert that the empty file can be accessed by height + f, err := edsStore.GetByHeight(ctx, height) + require.NoError(t, err) + hash, err := f.DataHash(ctx) + require.NoError(t, err) + require.True(t, hash.IsEmptyEDS()) + require.NoError(t, f.Close()) + }) + + t.Run("empty EDS are persisted", func(t *testing.T) { + dir := t.TempDir() + edsStore, err := NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + eds := share.EmptyEDS() + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + from, to := 10, 20 + + // store empty EDSs + for i := from; i <= to; i++ { + err := edsStore.PutODSQ4(ctx, roots, uint64(i), eds) + require.NoError(t, err) + } + + // close and reopen the store to ensure that the empty files are persisted + require.NoError(t, edsStore.Stop(ctx)) + edsStore, err = NewStore(DefaultParameters(), dir) + require.NoError(t, err) + + // assert that the empty files restored from disk + for i := from; i <= to; i++ { + f, err := edsStore.GetByHeight(ctx, uint64(i)) + require.NoError(t, err) + hash, err := f.DataHash(ctx) + require.NoError(t, err) + require.True(t, hash.IsEmptyEDS()) + require.NoError(t, f.Close()) + } + }) + + t.Run("reopen", func(t *testing.T) { + // tests that store can be reopened + err = edsStore.Stop(ctx) + require.NoError(t, err) + + edsStore, err = NewStore(paramsNoCache(), dir) + require.NoError(t, err) + }) +} + +func BenchmarkStore(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(cancel) + + eds := edstest.RandEDS(b, 128) + roots, err := share.NewAxisRoots(eds) + require.NoError(b, err) + + // BenchmarkStore/put_128-16 186 6623266 ns/op + b.Run("put 128", func(b *testing.B) { + edsStore, err := NewStore(paramsNoCache(), b.TempDir()) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + roots := edstest.RandomAxisRoots(b, 1) + _ = edsStore.PutODSQ4(ctx, roots, uint64(i), eds) + } + }) + + // read 128 EDSs does not read full EDS, but only the header + // BenchmarkStore/open_by_height,_128-16 1585693 747.6 ns/op (~7mcs) + b.Run("open by height, 128", func(b *testing.B) { + edsStore, err := NewStore(paramsNoCache(), b.TempDir()) + require.NoError(b, err) + + height := uint64(1984) + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := edsStore.GetByHeight(ctx, height) + require.NoError(b, err) + require.NoError(b, f.Close()) + } + }) + + // BenchmarkStore/open_by_hash,_128-16 1240942 945.9 ns/op (~9mcs) + b.Run("open by hash, 128", func(b *testing.B) { + edsStore, err := NewStore(DefaultParameters(), b.TempDir()) + require.NoError(b, err) + + height := uint64(1984) + err = edsStore.PutODSQ4(ctx, roots, height, eds) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := edsStore.GetByHash(ctx, roots.Hash()) + require.NoError(b, err) + require.NoError(b, f.Close()) + } + }) +} + +func randomEDS(t testing.TB) (*rsmt2d.ExtendedDataSquare, *share.AxisRoots) { + eds := edstest.RandEDS(t, 4) + roots, err := share.NewAxisRoots(eds) + require.NoError(t, err) + + return eds, roots +} + +func ensureAmountFileAndLinks(t testing.TB, dir string, files, links int) { + // add empty file ods and q4 parts and heights folder to the count + files += 3 + // ensure block folder contains the correct amount of files + blockPath := path.Join(dir, blocksPath) + entries, err := os.ReadDir(blockPath) + require.NoError(t, err) + require.Len(t, entries, files) + + // ensure heights folder contains the correct amount of links + linksPath := path.Join(dir, heightsPath) + entries, err = os.ReadDir(linksPath) + require.NoError(t, err) + require.Len(t, entries, links) +} + +func hasByHashAndHeight( + t testing.TB, + store *Store, + ctx context.Context, + hash share.DataHash, + height uint64, + hasByHash, hasByHeight bool, +) { + has, err := store.HasByHash(ctx, hash) + require.NoError(t, err) + require.Equal(t, hasByHash, has) + + has, err = store.HasByHeight(ctx, height) + require.NoError(t, err) + require.Equal(t, hasByHeight, has) +} diff --git a/store/striplock.go b/store/striplock.go new file mode 100644 index 0000000000..4738453c77 --- /dev/null +++ b/store/striplock.go @@ -0,0 +1,55 @@ +package store + +import ( + "sync" + + "github.com/celestiaorg/celestia-node/share" +) + +// TODO: move to utils +type striplock struct { + heights []*sync.RWMutex + datahashes []*sync.RWMutex +} + +type multiLock struct { + mu []*sync.RWMutex +} + +func newStripLock(size int) *striplock { + heights := make([]*sync.RWMutex, size) + datahashes := make([]*sync.RWMutex, size) + for i := 0; i < size; i++ { + heights[i] = &sync.RWMutex{} + datahashes[i] = &sync.RWMutex{} + } + return &striplock{heights, datahashes} +} + +func (l *striplock) byHeight(height uint64) *sync.RWMutex { + lkIdx := height % uint64(len(l.heights)) + return l.heights[lkIdx] +} + +func (l *striplock) byHash(datahash share.DataHash) *sync.RWMutex { + // Use the last 2 bytes of the hash as key to distribute the locks + last := uint16(datahash[len(datahash)-1]) | uint16(datahash[len(datahash)-2])<<8 + lkIdx := last % uint16(len(l.datahashes)) + return l.datahashes[lkIdx] +} + +func (l *striplock) byHashAndHeight(datahash share.DataHash, height uint64) *multiLock { + return &multiLock{[]*sync.RWMutex{l.byHash(datahash), l.byHeight(height)}} +} + +func (m *multiLock) lock() { + for _, lk := range m.mu { + lk.Lock() + } +} + +func (m *multiLock) unlock() { + for _, lk := range m.mu { + lk.Unlock() + } +}