From 8deffba77b0e68f6a2229bc8266359eda704c584 Mon Sep 17 00:00:00 2001 From: Ryan Date: Tue, 29 Aug 2023 12:43:52 +0200 Subject: [PATCH 01/19] fix: fixing panic on GetShare with out of bounds indexes (#2605) GetShare would panic on RPC calls if the passed indicies were out of bounds. --- share/getter.go | 2 ++ share/getters/cascade.go | 8 ++++---- share/getters/getter_test.go | 8 ++++++++ share/getters/ipld.go | 6 ++++++ share/getters/store.go | 6 ++++++ 5 files changed, 26 insertions(+), 4 deletions(-) diff --git a/share/getter.go b/share/getter.go index a33ea7aa87..19e4498458 100644 --- a/share/getter.go +++ b/share/getter.go @@ -13,6 +13,8 @@ import ( var ( // ErrNotFound is used to indicate that requested data could not be found. ErrNotFound = errors.New("share: data not found") + // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the square size. + ErrOutOfBounds = errors.New("share: row or column index is larger than square size") ) // Getter interface provides a set of accessors for shares by the Root. diff --git a/share/getters/cascade.go b/share/getters/cascade.go index 98f29c87ca..63d7713d3d 100644 --- a/share/getters/cascade.go +++ b/share/getters/cascade.go @@ -3,7 +3,6 @@ package getters import ( "context" "errors" - "fmt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -38,9 +37,10 @@ func (cg *CascadeGetter) GetShare(ctx context.Context, root *share.Root, row, co attribute.Int("col", col), )) defer span.End() - if row >= len(root.RowRoots) || col >= len(root.ColumnRoots) { - err := fmt.Errorf("cascade/get-share: invalid indexes were provided:rowIndex=%d, colIndex=%d."+ - "squarewidth=%d", row, col, len(root.RowRoots)) + + upperBound := len(root.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds span.RecordError(err) return nil, err } diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index ae90074625..02e075459b 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -107,6 +107,10 @@ func TestStoreGetter(t *testing.T) { } } + // doesn't panic on indexes too high + _, err := sg.GetShare(ctx, &dah, squareSize, squareSize) + require.ErrorIs(t, err, share.ErrOutOfBounds) + // root not found _, dah = randomEDS(t) _, err = sg.GetShare(ctx, &dah, 0, 0) @@ -184,6 +188,10 @@ func TestIPLDGetter(t *testing.T) { } } + // doesn't panic on indexes too high + _, err := sg.GetShare(ctx, &dah, squareSize+1, squareSize+1) + require.ErrorIs(t, err, share.ErrOutOfBounds) + // root not found _, dah = randomEDS(t) _, err = sg.GetShare(ctx, &dah, 0, 0) diff --git a/share/getters/ipld.go b/share/getters/ipld.go index af6e7673ba..a892e0fc82 100644 --- a/share/getters/ipld.go +++ b/share/getters/ipld.go @@ -48,6 +48,12 @@ func (ig *IPLDGetter) GetShare(ctx context.Context, dah *share.Root, row, col in utils.SetStatusAndEnd(span, err) }() + upperBound := len(dah.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } root, leaf := ipld.Translate(dah, row, col) // wrap the blockservice in a session if it has been signaled in the context. diff --git a/share/getters/store.go b/share/getters/store.go index 28c6c6d021..989649f795 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -43,6 +43,12 @@ func (sg *StoreGetter) GetShare(ctx context.Context, dah *share.Root, row, col i utils.SetStatusAndEnd(span, err) }() + upperBound := len(dah.RowRoots) + if row >= upperBound || col >= upperBound { + err := share.ErrOutOfBounds + span.RecordError(err) + return nil, err + } root, leaf := ipld.Translate(dah, row, col) bs, err := sg.store.CARBlockstore(ctx, dah.Hash()) if errors.Is(err, eds.ErrNotFound) { From 9d663cf7d948fefa961d9b3c8d0afb8dca3ec8ab Mon Sep 17 00:00:00 2001 From: Hlib Kanunnikov Date: Wed, 30 Aug 2023 13:12:01 +0200 Subject: [PATCH 02/19] chore: bump go-header and go-fraud (#2619) This PR bumps go-header and go-fraud which are both breaking - lots of changes here so please review carefully. Note that this also contains re-introduction of VerifyCommitLightTrusting which caused us issues in `mocha` due to rapid voting power changes. At the moment, there are no test utilities to test that specific case, so we need to wait to roll it out to `mocha`. Related: https://github.com/celestiaorg/celestia-app/issues/2382 --------- Co-authored-by: rene <41963722+renaynay@users.noreply.github.com> --- api/docgen/examples.go | 2 +- core/exchange.go | 11 +-- core/header_test.go | 2 +- core/listener.go | 4 +- core/listener_test.go | 4 +- das/coordinator_test.go | 8 +-- das/daser.go | 8 +-- das/daser_test.go | 46 ++++++++---- das/state.go | 15 ++-- das/worker.go | 2 +- go.mod | 4 +- go.sum | 8 +-- header/header.go | 91 ++++++++++++++++-------- header/headertest/testing.go | 8 +-- header/headertest/verify_test.go | 49 +++---------- header/serde.go | 2 +- header/verify.go | 76 -------------------- nodebuilder/das/constructors.go | 6 +- nodebuilder/das/module.go | 5 +- nodebuilder/fraud/constructors.go | 72 +++++++++++-------- nodebuilder/fraud/fraud.go | 84 ++++++++++++++++++++++ nodebuilder/fraud/lifecycle.go | 24 ++++--- nodebuilder/fraud/module.go | 14 ++-- nodebuilder/fraud/service.go | 87 ---------------------- nodebuilder/fraud/unmarshaler.go | 32 +++++++++ nodebuilder/header/constructors.go | 33 +++++---- nodebuilder/header/module.go | 48 +++++++------ nodebuilder/header/module_test.go | 8 +-- nodebuilder/header/service.go | 8 +-- nodebuilder/header/service_test.go | 4 +- nodebuilder/module.go | 5 +- nodebuilder/p2p/pubsub.go | 13 ++-- nodebuilder/settings.go | 3 +- nodebuilder/state/core.go | 6 +- nodebuilder/state/module.go | 7 +- nodebuilder/testing.go | 5 +- nodebuilder/tests/api_test.go | 4 +- nodebuilder/tests/sync_test.go | 4 +- share/eds/byzantine/bad_encoding.go | 41 ++++------- share/eds/byzantine/bad_encoding_test.go | 2 +- share/eds/retriever_test.go | 4 +- share/p2p/discovery/discovery.go | 3 +- share/p2p/peers/manager.go | 2 +- share/p2p/peers/manager_test.go | 8 +-- state/core_access.go | 13 ++-- state/integration_test.go | 6 +- 46 files changed, 449 insertions(+), 442 deletions(-) delete mode 100644 header/verify.go delete mode 100644 nodebuilder/fraud/service.go create mode 100644 nodebuilder/fraud/unmarshaler.go diff --git a/api/docgen/examples.go b/api/docgen/examples.go index 3456880c4f..b873e7e050 100644 --- a/api/docgen/examples.go +++ b/api/docgen/examples.go @@ -56,7 +56,7 @@ var ExampleValues = map[reflect.Type]interface{}{ reflect.TypeOf(node.Full): node.Full, reflect.TypeOf(auth.Permission("admin")): auth.Permission("admin"), reflect.TypeOf(byzantine.BadEncoding): byzantine.BadEncoding, - reflect.TypeOf((*fraud.Proof)(nil)).Elem(): byzantine.CreateBadEncodingProof( + reflect.TypeOf((*fraud.Proof[*header.ExtendedHeader])(nil)).Elem(): byzantine.CreateBadEncodingProof( []byte("bad encoding proof"), 42, &byzantine.ErrByzantine{ diff --git a/core/exchange.go b/core/exchange.go index bed2404195..f8e1606a3e 100644 --- a/core/exchange.go +++ b/core/exchange.go @@ -78,7 +78,7 @@ func (ce *Exchange) GetVerifiedRange( from *header.ExtendedHeader, amount uint64, ) ([]*header.ExtendedHeader, error) { - headers, err := ce.GetRangeByHeight(ctx, uint64(from.Height())+1, amount) + headers, err := ce.GetRangeByHeight(ctx, from.Height()+1, amount) if err != nil { return nil, err } @@ -115,7 +115,7 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende return nil, fmt.Errorf("extending block data for height %d: %w", &block.Height, err) } // construct extended header - eh, err := ce.construct(ctx, &block.Header, comm, vals, eds) + eh, err := ce.construct(&block.Header, comm, vals, eds) if err != nil { panic(fmt.Errorf("constructing extended header for height %d: %w", &block.Height, err)) } @@ -133,7 +133,10 @@ func (ce *Exchange) Get(ctx context.Context, hash libhead.Hash) (*header.Extende return eh, nil } -func (ce *Exchange) Head(ctx context.Context) (*header.ExtendedHeader, error) { +func (ce *Exchange) Head( + ctx context.Context, + _ ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { log.Debug("requesting head") return ce.getExtendedHeaderByHeight(ctx, nil) } @@ -157,7 +160,7 @@ func (ce *Exchange) getExtendedHeaderByHeight(ctx context.Context, height *int64 return nil, fmt.Errorf("extending block data for height %d: %w", b.Header.Height, err) } // create extended header - eh, err := ce.construct(ctx, &b.Header, &b.Commit, &b.ValidatorSet, eds) + eh, err := ce.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) if err != nil { panic(fmt.Errorf("constructing extended header for height %d: %w", b.Header.Height, err)) } diff --git a/core/header_test.go b/core/header_test.go index 1c89db9d6b..c942ea7875 100644 --- a/core/header_test.go +++ b/core/header_test.go @@ -33,7 +33,7 @@ func TestMakeExtendedHeaderForEmptyBlock(t *testing.T) { eds, err := extendBlock(b.Data, b.Header.Version.App) require.NoError(t, err) - headerExt, err := header.MakeExtendedHeader(ctx, &b.Header, comm, val, eds) + headerExt, err := header.MakeExtendedHeader(&b.Header, comm, val, eds) require.NoError(t, err) assert.Equal(t, header.EmptyDAH(), *headerExt.DAH) diff --git a/core/listener.go b/core/listener.go index 565fc62032..1c79fbbe71 100644 --- a/core/listener.go +++ b/core/listener.go @@ -160,7 +160,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS return fmt.Errorf("extending block data: %w", err) } // generate extended header - eh, err := cl.construct(ctx, &b.Header, &b.Commit, &b.ValidatorSet, eds) + eh, err := cl.construct(&b.Header, &b.Commit, &b.ValidatorSet, eds) if err != nil { panic(fmt.Errorf("making extended header: %w", err)) } @@ -181,7 +181,7 @@ func (cl *Listener) handleNewSignedBlock(ctx context.Context, b types.EventDataS if !syncing { err = cl.hashBroadcaster(ctx, shrexsub.Notification{ DataHash: eh.DataHash.Bytes(), - Height: uint64(eh.Height()), + Height: eh.Height(), }) if err != nil && !errors.Is(err, context.Canceled) { log.Errorw("listener: broadcasting data hash", diff --git a/core/listener_test.go b/core/listener_test.go index 7d4b12310a..8b3d05bea9 100644 --- a/core/listener_test.go +++ b/core/listener_test.go @@ -31,8 +31,8 @@ func TestListener(t *testing.T) { // create mocknet with two pubsub endpoints ps0, ps1 := createMocknetWithTwoPubsubEndpoints(ctx, t) subscriber := p2p.NewSubscriber[*header.ExtendedHeader](ps1, header.MsgID, networkID) - err := subscriber.AddValidator(func(context.Context, *header.ExtendedHeader) pubsub.ValidationResult { - return pubsub.ValidationAccept + err := subscriber.SetVerifier(func(context.Context, *header.ExtendedHeader) error { + return nil }) require.NoError(t, err) require.NoError(t, subscriber.Start(ctx)) diff --git a/das/coordinator_test.go b/das/coordinator_test.go index 188cb0d222..55ed01dd4e 100644 --- a/das/coordinator_test.go +++ b/das/coordinator_test.go @@ -366,7 +366,7 @@ func (m *mockSampler) sample(ctx context.Context, h *header.ExtendedHeader) erro m.lock.Lock() defer m.lock.Unlock() - height := uint64(h.Height()) + height := h.Height() m.done[height]++ if len(m.done) > int(m.NetworkHead-m.SampleFrom) && !m.isFinished { @@ -503,7 +503,7 @@ func (o *checkOrder) middleWare(out sampleFn) sampleFn { if len(o.queue) > 0 { // check last item in queue to be same as input - if o.queue[0] != uint64(h.Height()) { + if o.queue[0] != h.Height() { defer o.lock.Unlock() return fmt.Errorf("expected height: %v,got: %v", o.queue[0], h.Height()) } @@ -573,7 +573,7 @@ func (l *lock) releaseAll(except ...uint64) { func (l *lock) middleWare(out sampleFn) sampleFn { return func(ctx context.Context, h *header.ExtendedHeader) error { l.m.Lock() - ch, blocked := l.blockList[uint64(h.Height())] + ch, blocked := l.blockList[h.Height()] l.m.Unlock() if !blocked { return out(ctx, h) @@ -589,7 +589,7 @@ func (l *lock) middleWare(out sampleFn) sampleFn { } func onceMiddleWare(out sampleFn) sampleFn { - db := make(map[int64]int) + db := make(map[uint64]int) m := sync.Mutex{} return func(ctx context.Context, h *header.ExtendedHeader) error { m.Lock() diff --git a/das/daser.go b/das/daser.go index d4ad0ee641..9d3e43a91b 100644 --- a/das/daser.go +++ b/das/daser.go @@ -25,7 +25,7 @@ type DASer struct { params Parameters da share.Availability - bcast fraud.Broadcaster + bcast fraud.Broadcaster[*header.ExtendedHeader] hsub libhead.Subscriber[*header.ExtendedHeader] // listens for new headers in the network getter libhead.Getter[*header.ExtendedHeader] // retrieves past headers @@ -47,7 +47,7 @@ func NewDASer( hsub libhead.Subscriber[*header.ExtendedHeader], getter libhead.Getter[*header.ExtendedHeader], dstore datastore.Datastore, - bcast fraud.Broadcaster, + bcast fraud.Broadcaster[*header.ExtendedHeader], shrexBroadcast shrexsub.BroadcastFn, options ...Option, ) (*DASer, error) { @@ -99,7 +99,7 @@ func (d *DASer) Start(ctx context.Context) error { // attempt to get head info. No need to handle error, later DASer // will be able to find new head from subscriber after it is started if h, err := d.getter.Head(ctx); err == nil { - cp.NetworkHead = uint64(h.Height()) + cp.NetworkHead = h.Height() } } log.Info("starting DASer from checkpoint: ", cp.String()) @@ -152,7 +152,7 @@ func (d *DASer) sample(ctx context.Context, h *header.ExtendedHeader) error { var byzantineErr *byzantine.ErrByzantine if errors.As(err, &byzantineErr) { log.Warn("Propagating proof...") - sendErr := d.bcast.Broadcast(ctx, byzantine.CreateBadEncodingProof(h.Hash(), uint64(h.Height()), byzantineErr)) + sendErr := d.bcast.Broadcast(ctx, byzantine.CreateBadEncodingProof(h.Hash(), h.Height(), byzantineErr)) if sendErr != nil { log.Errorw("fraud proof propagating failed", "err", sendErr) } diff --git a/das/daser_test.go b/das/daser_test.go index 7398310a6b..68f6e01ef2 100644 --- a/das/daser_test.go +++ b/das/daser_test.go @@ -159,21 +159,37 @@ func TestDASer_stopsAfter_BEFP(t *testing.T) { mockGet, sub, _ := createDASerSubcomponents(t, bServ, 15, 15) // create fraud service and break one header - getter := func(ctx context.Context, height uint64) (libhead.Header, error) { + getter := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return mockGet.GetByHeight(ctx, height) } - f := fraudserv.NewProofService(ps, net.Hosts()[0], getter, ds, false, "private") - require.NoError(t, f.Start(ctx)) + unmarshaler := fraud.MultiUnmarshaler[*header.ExtendedHeader]{ + Unmarshalers: map[fraud.ProofType]func([]byte) (fraud.Proof[*header.ExtendedHeader], error){ + byzantine.BadEncoding: func(data []byte) (fraud.Proof[*header.ExtendedHeader], error) { + befp := &byzantine.BadEncodingProof{} + return befp, befp.UnmarshalBinary(data) + }, + }, + } + + fserv := fraudserv.NewProofService[*header.ExtendedHeader](ps, + net.Hosts()[0], + getter, + unmarshaler, + ds, + false, + "private", + ) + require.NoError(t, fserv.Start(ctx)) mockGet.headers[1], _ = headertest.CreateFraudExtHeader(t, mockGet.headers[1], bServ) newCtx := context.Background() // create and start DASer - daser, err := NewDASer(avail, sub, mockGet, ds, f, newBroadcastMock(1)) + daser, err := NewDASer(avail, sub, mockGet, ds, fserv, newBroadcastMock(1)) require.NoError(t, err) resultCh := make(chan error) - go fraud.OnProof(newCtx, f, byzantine.BadEncoding, - func(fraud.Proof) { + go fraud.OnProof[*header.ExtendedHeader](newCtx, fserv, byzantine.BadEncoding, + func(fraud.Proof[*header.ExtendedHeader]) { resultCh <- daser.Stop(newCtx) }) @@ -210,10 +226,10 @@ func TestDASerSampleTimeout(t *testing.T) { ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) sub := new(headertest.Subscriber) - f := new(fraudtest.DummyService) + fserv := &fraudtest.DummyService[*header.ExtendedHeader]{} // create and start DASer - daser, err := NewDASer(avail, sub, getter, ds, f, newBroadcastMock(1), WithSampleTimeout(1)) + daser, err := NewDASer(avail, sub, getter, ds, fserv, newBroadcastMock(1), WithSampleTimeout(1)) require.NoError(t, err) require.NoError(t, daser.Start(ctx)) @@ -235,9 +251,9 @@ func createDASerSubcomponents( bServ blockservice.BlockService, numGetter, numSub int, -) (*mockGetter, *headertest.Subscriber, *fraudtest.DummyService) { +) (*mockGetter, *headertest.Subscriber, *fraudtest.DummyService[*header.ExtendedHeader]) { mockGet, sub := createMockGetterAndSub(t, bServ, numGetter, numSub) - fraud := new(fraudtest.DummyService) + fraud := &fraudtest.DummyService[*header.ExtendedHeader]{} return mockGet, sub, fraud } @@ -313,7 +329,10 @@ func (m *mockGetter) generateHeaders(t *testing.T, bServ blockservice.BlockServi m.head = int64(startHeight + endHeight) } -func (m *mockGetter) Head(context.Context) (*header.ExtendedHeader, error) { +func (m *mockGetter) Head( + context.Context, + ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { return m.headers[m.head], nil } @@ -354,7 +373,10 @@ func (m benchGetterStub) GetByHeight(context.Context, uint64) (*header.ExtendedH type getterStub struct{} -func (m getterStub) Head(context.Context) (*header.ExtendedHeader, error) { +func (m getterStub) Head( + context.Context, + ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { return &header.ExtendedHeader{RawHeader: header.RawHeader{Height: 1}}, nil } diff --git a/das/state.go b/das/state.go index 6af0b7d8d8..bd3a018a40 100644 --- a/das/state.go +++ b/das/state.go @@ -132,30 +132,29 @@ func (s *coordinatorState) handleRetryResult(res result) { } } -func (s *coordinatorState) isNewHead(newHead int64) bool { +func (s *coordinatorState) isNewHead(newHead uint64) bool { // seen this header before - if uint64(newHead) <= s.networkHead { + if newHead <= s.networkHead { log.Warnf("received head height: %v, which is lower or the same as previously known: %v", newHead, s.networkHead) return false } return true } -func (s *coordinatorState) updateHead(newHead int64) { +func (s *coordinatorState) updateHead(newHead uint64) { if s.networkHead == s.sampleFrom { log.Infow("found first header, starting sampling") } - s.networkHead = uint64(newHead) + s.networkHead = newHead log.Debugw("updated head", "from_height", s.networkHead, "to_height", newHead) s.checkDone() } // recentJob creates a job to process a recent header. func (s *coordinatorState) recentJob(header *header.ExtendedHeader) job { - height := uint64(header.Height()) // move next, to prevent catchup job from processing same height - if s.next == height { + if s.next == header.Height() { s.next++ } s.nextJobID++ @@ -163,8 +162,8 @@ func (s *coordinatorState) recentJob(header *header.ExtendedHeader) job { id: s.nextJobID, jobType: recentJob, header: header, - from: height, - to: height, + from: header.Height(), + to: header.Height(), } } diff --git a/das/worker.go b/das/worker.go index 746324ec48..f2e8c4d821 100644 --- a/das/worker.go +++ b/das/worker.go @@ -135,7 +135,7 @@ func (w *worker) sample(ctx context.Context, timeout time.Duration, height uint6 if w.state.job.jobType == recentJob { err = w.broadcast(ctx, shrexsub.Notification{ DataHash: h.DataHash.Bytes(), - Height: uint64(h.Height()), + Height: h.Height(), }) if err != nil { log.Warn("failed to broadcast availability message", diff --git a/go.mod b/go.mod index 63f6879041..846f4316e6 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/benbjohnson/clock v1.3.5 github.com/celestiaorg/celestia-app v1.0.0-rc12 github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 - github.com/celestiaorg/go-fraud v0.1.2 - github.com/celestiaorg/go-header v0.2.13 + github.com/celestiaorg/go-fraud v0.2.0 + github.com/celestiaorg/go-header v0.3.0 github.com/celestiaorg/go-libp2p-messenger v0.2.0 github.com/celestiaorg/nmt v0.18.1 github.com/celestiaorg/rsmt2d v0.11.0 diff --git a/go.sum b/go.sum index 6adeef7f7c..09bd8ab26e 100644 --- a/go.sum +++ b/go.sum @@ -366,10 +366,10 @@ github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJ github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403/go.mod h1:cCGM1UoMvyTk8k62mkc+ReVu8iHBCtSBAAL4wYU7KEI= github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 h1:MJgXvhJP1Au8rXTvMMlBXodu9jplEK1DxiLtMnEphOs= github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5/go.mod h1:r6xB3nvGotmlTACpAr3SunxtoXeesbqb57elgMJqflY= -github.com/celestiaorg/go-fraud v0.1.2 h1:Bf7yIN3lZ4IR/Vlu5OtmcVCVNESBKEJ/xwu28rRKGA8= -github.com/celestiaorg/go-fraud v0.1.2/go.mod h1:kHZXQY+6gd1kYkoWRFFKgWyrLPWRgDN3vd1Ll9gE/oo= -github.com/celestiaorg/go-header v0.2.13 h1:sUJLXYs8ViPpxLXyIIaW3h4tPFgtVYMhzsLC4GHfS8I= -github.com/celestiaorg/go-header v0.2.13/go.mod h1:NhiWq97NtAYyRBu8quzYOUghQULjgOzO2Ql0iVEFOf0= +github.com/celestiaorg/go-fraud v0.2.0 h1:aaq2JiW0gTnhEdac3l51UCqSyJ4+VjFGTTpN83V4q7I= +github.com/celestiaorg/go-fraud v0.2.0/go.mod h1:lNY1i4K6kUeeE60Z2VK8WXd+qXb8KRzfBhvwPkK6aUc= +github.com/celestiaorg/go-header v0.3.0 h1:9fhxSgldPiWWq3yd9u7oSk5vYqaLV1JkeTnJdGcisFo= +github.com/celestiaorg/go-header v0.3.0/go.mod h1:H8xhnDLDLbkpwmWPhCaZyTnIV3dlVxBHPnxNXS2Qu6c= github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO3jHV7wKHvWD/Irao= github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/go-verifcid v0.0.1-lazypatch h1:9TSe3w1cmJmbWlweCwCTIZkan7jV8M+KwglXpdD+UG8= diff --git a/header/header.go b/header/header.go index d69b11d998..92f8538696 100644 --- a/header/header.go +++ b/header/header.go @@ -2,12 +2,12 @@ package header import ( "bytes" - "context" "encoding/json" "fmt" "time" tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/light" core "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" @@ -18,7 +18,6 @@ import ( // ConstructFn aliases a function that creates an ExtendedHeader. type ConstructFn = func( - context.Context, *core.Header, *core.Commit, *core.ValidatorSet, @@ -45,31 +44,8 @@ type ExtendedHeader struct { DAH *DataAvailabilityHeader `json:"dah"` } -func (eh *ExtendedHeader) New() libhead.Header { - return new(ExtendedHeader) -} - -func (eh *ExtendedHeader) IsZero() bool { - return eh == nil -} - -func (eh *ExtendedHeader) ChainID() string { - return eh.RawHeader.ChainID -} - -func (eh *ExtendedHeader) Height() int64 { - return eh.RawHeader.Height -} - -func (eh *ExtendedHeader) Time() time.Time { - return eh.RawHeader.Time -} - -var _ libhead.Header = &ExtendedHeader{} - // MakeExtendedHeader assembles new ExtendedHeader. func MakeExtendedHeader( - _ context.Context, h *core.Header, comm *core.Commit, vals *core.ValidatorSet, @@ -95,14 +71,34 @@ func MakeExtendedHeader( Commit: comm, ValidatorSet: vals, } - return eh, eh.Validate() + return eh, nil +} + +func (eh *ExtendedHeader) New() *ExtendedHeader { + return new(ExtendedHeader) +} + +func (eh *ExtendedHeader) IsZero() bool { + return eh == nil +} + +func (eh *ExtendedHeader) ChainID() string { + return eh.RawHeader.ChainID +} + +func (eh *ExtendedHeader) Height() uint64 { + return uint64(eh.RawHeader.Height) +} + +func (eh *ExtendedHeader) Time() time.Time { + return eh.RawHeader.Time } // Hash returns Hash of the wrapped RawHeader. // NOTE: It purposely overrides Hash method of RawHeader to get it directly from Commit without // recomputing. func (eh *ExtendedHeader) Hash() libhead.Hash { - return libhead.Hash(eh.Commit.BlockID.Hash) + return eh.Commit.BlockID.Hash.Bytes() } // LastHeader returns the Hash of the last wrapped RawHeader. @@ -158,7 +154,8 @@ func (eh *ExtendedHeader) Validate() error { return fmt.Errorf("commit signs block %X, header is block %X", chash, hhash) } - if err := eh.ValidatorSet.VerifyCommitLight(eh.ChainID(), eh.Commit.BlockID, eh.Height(), eh.Commit); err != nil { + err = eh.ValidatorSet.VerifyCommitLight(eh.ChainID(), eh.Commit.BlockID, int64(eh.Height()), eh.Commit) + if err != nil { return fmt.Errorf("VerifyCommitLight error at height %d: %w", eh.Height(), err) } @@ -169,6 +166,42 @@ func (eh *ExtendedHeader) Validate() error { return nil } +// Verify validates given untrusted Header against trusted ExtendedHeader. +func (eh *ExtendedHeader) Verify(untrst *ExtendedHeader) error { + isAdjacent := eh.Height()+1 == untrst.Height() + if isAdjacent { + // Optimized verification for adjacent headers + // Check the validator hashes are the same + if !bytes.Equal(untrst.ValidatorsHash, eh.NextValidatorsHash) { + return &libhead.VerifyError{ + Reason: fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", + eh.NextValidatorsHash, + untrst.ValidatorsHash, + ), + } + } + + if !bytes.Equal(untrst.LastHeader(), eh.Hash()) { + return &libhead.VerifyError{ + Reason: fmt.Errorf("expected new header to point to last header hash (%X), but got %X)", + eh.Hash(), + untrst.LastHeader(), + ), + } + } + + return nil + } + + if err := eh.ValidatorSet.VerifyCommitLightTrusting(eh.ChainID(), untrst.Commit, light.DefaultTrustLevel); err != nil { + return &libhead.VerifyError{ + Reason: err, + SoftFailure: true, + } + } + return nil +} + // MarshalBinary marshals ExtendedHeader to binary. func (eh *ExtendedHeader) MarshalBinary() ([]byte, error) { return MarshalExtendedHeader(eh) @@ -240,3 +273,5 @@ func (eh *ExtendedHeader) UnmarshalJSON(data []byte) error { eh.RawHeader = *rawHeader return nil } + +var _ libhead.Header[*ExtendedHeader] = &ExtendedHeader{} diff --git a/header/headertest/testing.go b/header/headertest/testing.go index b20d389452..65ae8c950f 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -158,9 +158,9 @@ func (s *TestSuite) NextHeader() *header.ExtendedHeader { } func (s *TestSuite) GenRawHeader( - height int64, lastHeader, lastCommit, dataHash libhead.Hash) *header.RawHeader { + height uint64, lastHeader, lastCommit, dataHash libhead.Hash) *header.RawHeader { rh := RandRawHeader(s.t) - rh.Height = height + rh.Height = int64(height) rh.Time = time.Now() rh.LastBlockID = types.BlockID{Hash: bytes.HexBytes(lastHeader)} rh.LastCommitHash = bytes.HexBytes(lastCommit) @@ -299,7 +299,7 @@ func RandBlockID(*testing.T) types.BlockID { // FraudMaker creates a custom ConstructFn that breaks the block at the given height. func FraudMaker(t *testing.T, faultHeight int64, bServ blockservice.BlockService) header.ConstructFn { log.Warn("Corrupting block...", "height", faultHeight) - return func(ctx context.Context, + return func( h *types.Header, comm *types.Commit, vals *types.ValidatorSet, @@ -318,7 +318,7 @@ func FraudMaker(t *testing.T, faultHeight int64, bServ blockservice.BlockService } return eh, nil } - return header.MakeExtendedHeader(ctx, h, comm, vals, eds) + return header.MakeExtendedHeader(h, comm, vals, eds) } } diff --git a/header/headertest/verify_test.go b/header/headertest/verify_test.go index 33bcf72642..7ef16afc8d 100644 --- a/header/headertest/verify_test.go +++ b/header/headertest/verify_test.go @@ -3,34 +3,32 @@ package headertest import ( "strconv" "testing" - "time" "github.com/stretchr/testify/assert" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - libhead "github.com/celestiaorg/go-header" + "github.com/celestiaorg/celestia-node/header" ) func TestVerify(t *testing.T) { h := NewTestSuite(t, 2).GenExtendedHeaders(3) trusted, untrustedAdj, untrustedNonAdj := h[0], h[1], h[2] tests := []struct { - prepare func() libhead.Header + prepare func() *header.ExtendedHeader err bool }{ { - prepare: func() libhead.Header { return untrustedAdj }, + prepare: func() *header.ExtendedHeader { return untrustedAdj }, err: false, }, { - prepare: func() libhead.Header { + prepare: func() *header.ExtendedHeader { return untrustedNonAdj }, err: false, }, { - prepare: func() libhead.Header { + prepare: func() *header.ExtendedHeader { untrusted := *untrustedAdj untrusted.ValidatorsHash = tmrand.Bytes(32) return &untrusted @@ -38,7 +36,7 @@ func TestVerify(t *testing.T) { err: true, }, { - prepare: func() libhead.Header { + prepare: func() *header.ExtendedHeader { untrusted := *untrustedAdj untrusted.RawHeader.LastBlockID.Hash = tmrand.Bytes(32) return &untrusted @@ -46,37 +44,10 @@ func TestVerify(t *testing.T) { err: true, }, { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Time = untrustedAdj.RawHeader.Time.Add(time.Minute) - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Time = untrustedAdj.RawHeader.Time.Truncate(time.Hour) - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.ChainID = "toaster" - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Height++ - return untrustedAdj - }, - err: true, - }, - { - prepare: func() libhead.Header { - untrustedAdj.RawHeader.Version.App = appconsts.LatestVersion + 1 - return untrustedAdj + prepare: func() *header.ExtendedHeader { + untrusted := *untrustedNonAdj + untrusted.Commit = NewTestSuite(t, 2).Commit(RandRawHeader(t)) + return &untrusted }, err: true, }, diff --git a/header/serde.go b/header/serde.go index f4763e3b3b..a511a1352b 100644 --- a/header/serde.go +++ b/header/serde.go @@ -61,7 +61,7 @@ func UnmarshalExtendedHeader(data []byte) (*ExtendedHeader, error) { return nil, err } - return out, out.Validate() + return out, nil } func ExtendedHeaderToProto(eh *ExtendedHeader) (*header_pb.ExtendedHeader, error) { diff --git a/header/verify.go b/header/verify.go deleted file mode 100644 index 827f6c1d1b..0000000000 --- a/header/verify.go +++ /dev/null @@ -1,76 +0,0 @@ -package header - -import ( - "bytes" - "fmt" - "time" - - libhead "github.com/celestiaorg/go-header" -) - -// Verify validates given untrusted Header against trusted ExtendedHeader. -func (eh *ExtendedHeader) Verify(untrusted libhead.Header) error { - untrst, ok := untrusted.(*ExtendedHeader) - if !ok { - // if the header of the type was given, something very wrong happens - panic(fmt.Sprintf("invalid header type: expected %T, got %T", eh, untrusted)) - } - - if err := eh.verify(untrst); err != nil { - return &libhead.VerifyError{Reason: err} - } - - isAdjacent := eh.Height()+1 == untrst.Height() - if isAdjacent { - // Optimized verification for adjacent headers - // Check the validator hashes are the same - if !bytes.Equal(untrst.ValidatorsHash, eh.NextValidatorsHash) { - return &libhead.VerifyError{ - Reason: fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", - eh.NextValidatorsHash, - untrst.ValidatorsHash, - ), - } - } - - if !bytes.Equal(untrst.LastHeader(), eh.Hash()) { - return &libhead.VerifyError{ - Reason: fmt.Errorf("expected new header to point to last header hash (%X), but got %X)", - eh.Hash(), - untrst.LastHeader(), - ), - } - } - - return nil - } - - return nil -} - -// clockDrift defines how much new header's time can drift into -// the future relative to the now time during verification. -var clockDrift = 10 * time.Second - -// verify performs basic verification of untrusted header. -func (eh *ExtendedHeader) verify(untrst libhead.Header) error { - if untrst.Height() <= eh.Height() { - return fmt.Errorf("untrusted header height(%d) <= current trusted header(%d)", untrst.Height(), eh.Height()) - } - - if untrst.ChainID() != eh.ChainID() { - return fmt.Errorf("untrusted header has different chain %s, not %s", untrst.ChainID(), eh.ChainID()) - } - - if !untrst.Time().After(eh.Time()) { - return fmt.Errorf("untrusted header time(%v) must be after current trusted header(%v)", untrst.Time(), eh.Time()) - } - - now := time.Now() - if !untrst.Time().Before(now.Add(clockDrift)) { - return fmt.Errorf( - "new untrusted header has a time from the future %v (now: %v, clockDrift: %v)", untrst.Time(), now, clockDrift) - } - - return nil -} diff --git a/nodebuilder/das/constructors.go b/nodebuilder/das/constructors.go index 18f6962f40..7c6b5bed4f 100644 --- a/nodebuilder/das/constructors.go +++ b/nodebuilder/das/constructors.go @@ -42,16 +42,16 @@ func newDASer( hsub libhead.Subscriber[*header.ExtendedHeader], store libhead.Store[*header.ExtendedHeader], batching datastore.Batching, - fraudServ fraud.Service, + fraudServ fraud.Service[*header.ExtendedHeader], bFn shrexsub.BroadcastFn, options ...das.Option, -) (*das.DASer, *modfraud.ServiceBreaker[*das.DASer], error) { +) (*das.DASer, *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader], error) { ds, err := das.NewDASer(da, hsub, store, batching, fraudServ, bFn, options...) if err != nil { return nil, nil, err } - return ds, &modfraud.ServiceBreaker[*das.DASer]{ + return ds, &modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]{ Service: ds, FraudServ: fraudServ, FraudType: byzantine.BadEncoding, diff --git a/nodebuilder/das/module.go b/nodebuilder/das/module.go index 61c935fd40..d9f7e700e2 100644 --- a/nodebuilder/das/module.go +++ b/nodebuilder/das/module.go @@ -6,6 +6,7 @@ import ( "go.uber.org/fx" "github.com/celestiaorg/celestia-node/das" + "github.com/celestiaorg/celestia-node/header" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/node" ) @@ -41,10 +42,10 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { baseComponents, fx.Provide(fx.Annotate( newDASer, - fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer]) error { + fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]) error { return breaker.Start(ctx) }), - fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer]) error { + fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*das.DASer, *header.ExtendedHeader]) error { return breaker.Stop(ctx) }), )), diff --git a/nodebuilder/fraud/constructors.go b/nodebuilder/fraud/constructors.go index a70ee3e3d4..eee85d4139 100644 --- a/nodebuilder/fraud/constructors.go +++ b/nodebuilder/fraud/constructors.go @@ -1,8 +1,6 @@ package fraud import ( - "context" - "github.com/ipfs/go-datastore" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" @@ -16,32 +14,46 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/p2p" ) -func newFraudService(syncerEnabled bool) func( - fx.Lifecycle, - *pubsub.PubSub, - host.Host, - libhead.Store[*header.ExtendedHeader], - datastore.Batching, - p2p.Network, -) (Module, fraud.Service, error) { - return func( - lc fx.Lifecycle, - sub *pubsub.PubSub, - host host.Host, - hstore libhead.Store[*header.ExtendedHeader], - ds datastore.Batching, - network p2p.Network, - ) (Module, fraud.Service, error) { - getter := func(ctx context.Context, height uint64) (libhead.Header, error) { - return hstore.GetByHeight(ctx, height) - } - pservice := fraudserv.NewProofService(sub, host, getter, ds, syncerEnabled, network.String()) - lc.Append(fx.Hook{ - OnStart: pservice.Start, - OnStop: pservice.Stop, - }) - return &Service{ - Service: pservice, - }, pservice, nil - } +func fraudUnmarshaler() fraud.ProofUnmarshaler[*header.ExtendedHeader] { + return defaultProofUnmarshaler +} + +func newFraudServiceWithSync( + lc fx.Lifecycle, + sub *pubsub.PubSub, + host host.Host, + hstore libhead.Store[*header.ExtendedHeader], + registry fraud.ProofUnmarshaler[*header.ExtendedHeader], + ds datastore.Batching, + network p2p.Network, +) (Module, fraud.Service[*header.ExtendedHeader], error) { + syncerEnabled := true + pservice := fraudserv.NewProofService(sub, host, hstore.GetByHeight, registry, ds, syncerEnabled, network.String()) + lc.Append(fx.Hook{ + OnStart: pservice.Start, + OnStop: pservice.Stop, + }) + return &module{ + Service: pservice, + }, pservice, nil +} + +func newFraudServiceWithoutSync( + lc fx.Lifecycle, + sub *pubsub.PubSub, + host host.Host, + hstore libhead.Store[*header.ExtendedHeader], + registry fraud.ProofUnmarshaler[*header.ExtendedHeader], + ds datastore.Batching, + network p2p.Network, +) (Module, fraud.Service[*header.ExtendedHeader], error) { + syncerEnabled := false + pservice := fraudserv.NewProofService(sub, host, hstore.GetByHeight, registry, ds, syncerEnabled, network.String()) + lc.Append(fx.Hook{ + OnStart: pservice.Start, + OnStop: pservice.Stop, + }) + return &module{ + Service: pservice, + }, pservice, nil } diff --git a/nodebuilder/fraud/fraud.go b/nodebuilder/fraud/fraud.go index 8d10d34e88..45c3863d6f 100644 --- a/nodebuilder/fraud/fraud.go +++ b/nodebuilder/fraud/fraud.go @@ -2,8 +2,12 @@ package fraud import ( "context" + "encoding/json" + "errors" "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" ) var _ Module = (*API)(nil) @@ -35,3 +39,83 @@ func (api *API) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-cha func (api *API) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { return api.Internal.Get(ctx, proofType) } + +var _ Module = (*module)(nil) + +// module is an implementation of Module that uses fraud.module as a backend. It is used to +// provide fraud proofs as a non-interface type to the API, and wrap fraud.Subscriber with a +// channel of Proofs. +type module struct { + fraud.Service[*header.ExtendedHeader] +} + +func (s *module) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-chan Proof, error) { + subscription, err := s.Service.Subscribe(proofType) + if err != nil { + return nil, err + } + proofs := make(chan Proof) + go func() { + defer close(proofs) + defer subscription.Cancel() + for { + proof, err := subscription.Proof(ctx) + if err != nil { + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + log.Errorw("fetching proof from subscription", "err", err) + } + return + } + select { + case <-ctx.Done(): + return + case proofs <- Proof{Proof: proof}: + } + } + }() + return proofs, nil +} + +func (s *module) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { + originalProofs, err := s.Service.Get(ctx, proofType) + if err != nil { + return nil, err + } + proofs := make([]Proof, len(originalProofs)) + for i, originalProof := range originalProofs { + proofs[i].Proof = originalProof + } + return proofs, nil +} + +// Proof embeds the fraud.Proof interface type to provide a concrete type for JSON serialization. +type Proof struct { + fraud.Proof[*header.ExtendedHeader] +} + +type fraudProofJSON struct { + ProofType fraud.ProofType `json:"proof_type"` + Data []byte `json:"data"` +} + +func (f *Proof) UnmarshalJSON(data []byte) error { + var fp fraudProofJSON + err := json.Unmarshal(data, &fp) + if err != nil { + return err + } + f.Proof, err = defaultProofUnmarshaler.Unmarshal(fp.ProofType, fp.Data) + return err +} + +func (f *Proof) MarshalJSON() ([]byte, error) { + marshaledProof, err := f.MarshalBinary() + if err != nil { + return nil, err + } + fraudProof := &fraudProofJSON{ + ProofType: f.Type(), + Data: marshaledProof, + } + return json.Marshal(fraudProof) +} diff --git a/nodebuilder/fraud/lifecycle.go b/nodebuilder/fraud/lifecycle.go index cffa4d0f56..1a6702aafa 100644 --- a/nodebuilder/fraud/lifecycle.go +++ b/nodebuilder/fraud/lifecycle.go @@ -2,11 +2,13 @@ package fraud import ( "context" + "errors" "fmt" "github.com/ipfs/go-datastore" "github.com/celestiaorg/go-fraud" + libhead "github.com/celestiaorg/go-header" ) // service defines minimal interface with service lifecycle methods @@ -18,30 +20,30 @@ type service interface { // ServiceBreaker wraps any service with fraud proof subscription of a specific type. // If proof happens the service is Stopped automatically. // TODO(@Wondertan): Support multiple fraud types. -type ServiceBreaker[S service] struct { +type ServiceBreaker[S service, H libhead.Header[H]] struct { Service S FraudType fraud.ProofType - FraudServ fraud.Service + FraudServ fraud.Service[H] ctx context.Context cancel context.CancelFunc - sub fraud.Subscription + sub fraud.Subscription[H] } // Start starts the inner service if there are no fraud proofs stored. // Subscribes for fraud and stops the service whenever necessary. -func (breaker *ServiceBreaker[S]) Start(ctx context.Context) error { +func (breaker *ServiceBreaker[S, H]) Start(ctx context.Context) error { if breaker == nil { return nil } proofs, err := breaker.FraudServ.Get(ctx, breaker.FraudType) - switch err { + switch { default: return fmt.Errorf("getting proof(%s): %w", breaker.FraudType, err) - case nil: - return &fraud.ErrFraudExists{Proof: proofs} - case datastore.ErrNotFound: + case err == nil: + return &fraud.ErrFraudExists[H]{Proof: proofs} + case errors.Is(err, datastore.ErrNotFound): } err = breaker.Service.Start(ctx) @@ -60,7 +62,7 @@ func (breaker *ServiceBreaker[S]) Start(ctx context.Context) error { } // Stop stops the service and cancels subscription. -func (breaker *ServiceBreaker[S]) Stop(ctx context.Context) error { +func (breaker *ServiceBreaker[S, H]) Stop(ctx context.Context) error { if breaker == nil { return nil } @@ -75,13 +77,13 @@ func (breaker *ServiceBreaker[S]) Stop(ctx context.Context) error { return breaker.Service.Stop(ctx) } -func (breaker *ServiceBreaker[S]) awaitProof() { +func (breaker *ServiceBreaker[S, H]) awaitProof() { _, err := breaker.sub.Proof(breaker.ctx) if err != nil { return } - if err := breaker.Stop(breaker.ctx); err != nil && err != context.Canceled { + if err := breaker.Stop(breaker.ctx); err != nil && !errors.Is(err, context.Canceled) { log.Errorw("stopping service: %s", err.Error()) } } diff --git a/nodebuilder/fraud/module.go b/nodebuilder/fraud/module.go index 718b702f84..bf353f63c6 100644 --- a/nodebuilder/fraud/module.go +++ b/nodebuilder/fraud/module.go @@ -6,27 +6,31 @@ import ( "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" ) var log = logging.Logger("module/fraud") func ConstructModule(tp node.Type) fx.Option { - baseComponent := fx.Provide(func(serv fraud.Service) fraud.Getter { - return serv - }) + baseComponent := fx.Options( + fx.Provide(fraudUnmarshaler), + fx.Provide(func(serv fraud.Service[*header.ExtendedHeader]) fraud.Getter[*header.ExtendedHeader] { + return serv + }), + ) switch tp { case node.Light: return fx.Module( "fraud", baseComponent, - fx.Provide(newFraudService(true)), + fx.Provide(newFraudServiceWithSync), ) case node.Full, node.Bridge: return fx.Module( "fraud", baseComponent, - fx.Provide(newFraudService(false)), + fx.Provide(newFraudServiceWithoutSync), ) default: panic("invalid node type") diff --git a/nodebuilder/fraud/service.go b/nodebuilder/fraud/service.go deleted file mode 100644 index 0337c375ef..0000000000 --- a/nodebuilder/fraud/service.go +++ /dev/null @@ -1,87 +0,0 @@ -package fraud - -import ( - "context" - "encoding/json" - - "github.com/celestiaorg/go-fraud" -) - -var _ Module = (*Service)(nil) - -// Service is an implementation of Module that uses fraud.Service as a backend. It is used to -// provide fraud proofs as a non-interface type to the API, and wrap fraud.Subscriber with a -// channel of Proofs. -type Service struct { - fraud.Service -} - -func (s *Service) Subscribe(ctx context.Context, proofType fraud.ProofType) (<-chan Proof, error) { - subscription, err := s.Service.Subscribe(proofType) - if err != nil { - return nil, err - } - proofs := make(chan Proof) - go func() { - defer close(proofs) - for { - proof, err := subscription.Proof(ctx) - if err != nil { - if err != context.DeadlineExceeded && err != context.Canceled { - log.Errorw("fetching proof from subscription", "err", err) - } - return - } - select { - case <-ctx.Done(): - return - case proofs <- Proof{Proof: proof}: - } - } - }() - return proofs, nil -} - -func (s *Service) Get(ctx context.Context, proofType fraud.ProofType) ([]Proof, error) { - originalProofs, err := s.Service.Get(ctx, proofType) - if err != nil { - return nil, err - } - proofs := make([]Proof, len(originalProofs)) - for i, originalProof := range originalProofs { - proofs[i].Proof = originalProof - } - return proofs, nil -} - -// Proof embeds the fraud.Proof interface type to provide a concrete type for JSON serialization. -type Proof struct { - fraud.Proof -} - -type fraudProofJSON struct { - ProofType fraud.ProofType `json:"proof_type"` - Data []byte `json:"data"` -} - -func (f *Proof) UnmarshalJSON(data []byte) error { - var fp fraudProofJSON - err := json.Unmarshal(data, &fp) - if err != nil { - return err - } - f.Proof, err = fraud.Unmarshal(fp.ProofType, fp.Data) - return err -} - -func (f *Proof) MarshalJSON() ([]byte, error) { - marshaledProof, err := f.MarshalBinary() - if err != nil { - return nil, err - } - fraudProof := &fraudProofJSON{ - ProofType: f.Type(), - Data: marshaledProof, - } - return json.Marshal(fraudProof) -} diff --git a/nodebuilder/fraud/unmarshaler.go b/nodebuilder/fraud/unmarshaler.go new file mode 100644 index 0000000000..d5e0461f01 --- /dev/null +++ b/nodebuilder/fraud/unmarshaler.go @@ -0,0 +1,32 @@ +package fraud + +import ( + "github.com/celestiaorg/go-fraud" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" +) + +var defaultProofUnmarshaler proofRegistry + +type proofRegistry struct{} + +func (pr proofRegistry) List() []fraud.ProofType { + return []fraud.ProofType{ + byzantine.BadEncoding, + } +} + +func (pr proofRegistry) Unmarshal(proofType fraud.ProofType, data []byte) (fraud.Proof[*header.ExtendedHeader], error) { + switch proofType { + case byzantine.BadEncoding: + befp := &byzantine.BadEncodingProof{} + err := befp.UnmarshalBinary(data) + if err != nil { + return nil, err + } + return befp, nil + default: + return nil, &fraud.ErrNoUnmarshaler{ProofType: proofType} + } +} diff --git a/nodebuilder/header/constructors.go b/nodebuilder/header/constructors.go index 7d70f0f5a8..267f0c30f7 100644 --- a/nodebuilder/header/constructors.go +++ b/nodebuilder/header/constructors.go @@ -15,21 +15,20 @@ import ( "github.com/celestiaorg/go-header/store" "github.com/celestiaorg/go-header/sync" - "github.com/celestiaorg/celestia-node/header" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/share/eds/byzantine" ) // newP2PExchange constructs a new Exchange for headers. -func newP2PExchange( +func newP2PExchange[H libhead.Header[H]]( lc fx.Lifecycle, bpeers modp2p.Bootstrappers, network modp2p.Network, host host.Host, conngater *conngater.BasicConnectionGater, cfg Config, -) (libhead.Exchange[*header.ExtendedHeader], error) { +) (libhead.Exchange[H], error) { peers, err := cfg.trustedPeers(bpeers) if err != nil { return nil, err @@ -39,7 +38,7 @@ func newP2PExchange( ids[index] = peer.ID host.Peerstore().AddAddrs(peer.ID, peer.Addrs, peerstore.PermanentAddrTTL) } - exchange, err := p2p.NewExchange[*header.ExtendedHeader](host, ids, conngater, + exchange, err := p2p.NewExchange[H](host, ids, conngater, p2p.WithParams(cfg.Client), p2p.WithNetworkID[p2p.ClientParameters](network.String()), p2p.WithChainID(network.String()), @@ -60,14 +59,14 @@ func newP2PExchange( } // newSyncer constructs new Syncer for headers. -func newSyncer( - ex libhead.Exchange[*header.ExtendedHeader], - fservice libfraud.Service, - store InitStore, - sub libhead.Subscriber[*header.ExtendedHeader], +func newSyncer[H libhead.Header[H]]( + ex libhead.Exchange[H], + fservice libfraud.Service[H], + store InitStore[H], + sub libhead.Subscriber[H], cfg Config, -) (*sync.Syncer[*header.ExtendedHeader], *modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]], error) { - syncer, err := sync.NewSyncer[*header.ExtendedHeader](ex, store, sub, +) (*sync.Syncer[H], *modfraud.ServiceBreaker[*sync.Syncer[H], H], error) { + syncer, err := sync.NewSyncer[H](ex, store, sub, sync.WithParams(cfg.Syncer), sync.WithBlockTime(modp2p.BlockTime), ) @@ -75,7 +74,7 @@ func newSyncer( return nil, nil, err } - return syncer, &modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]]{ + return syncer, &modfraud.ServiceBreaker[*sync.Syncer[H], H]{ Service: syncer, FraudType: byzantine.BadEncoding, FraudServ: fservice, @@ -84,16 +83,16 @@ func newSyncer( // InitStore is a type representing initialized header store. // NOTE: It is needed to ensure that Store is always initialized before Syncer is started. -type InitStore libhead.Store[*header.ExtendedHeader] +type InitStore[H libhead.Header[H]] libhead.Store[H] // newInitStore constructs an initialized store -func newInitStore( +func newInitStore[H libhead.Header[H]]( lc fx.Lifecycle, cfg Config, net modp2p.Network, - s libhead.Store[*header.ExtendedHeader], - ex libhead.Exchange[*header.ExtendedHeader], -) (InitStore, error) { + s libhead.Store[H], + ex libhead.Exchange[H], +) (InitStore[H], error) { trustedHash, err := cfg.trustedHash(net) if err != nil { return nil, err diff --git a/nodebuilder/header/module.go b/nodebuilder/header/module.go index 77e7c5eb99..5e02e94fe1 100644 --- a/nodebuilder/header/module.go +++ b/nodebuilder/header/module.go @@ -22,7 +22,7 @@ import ( var log = logging.Logger("module/header") -func ConstructModule(tp node.Type, cfg *Config) fx.Option { +func ConstructModule[H libhead.Header[H]](tp node.Type, cfg *Config) fx.Option { // sanitize config values before constructing module cfgErr := cfg.Validate(tp) @@ -31,61 +31,63 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { fx.Error(cfgErr), fx.Provide(newHeaderService), fx.Provide(fx.Annotate( - func(ds datastore.Batching) (libhead.Store[*header.ExtendedHeader], error) { - return store.NewStore[*header.ExtendedHeader](ds, store.WithParams(cfg.Store)) + func(ds datastore.Batching) (libhead.Store[H], error) { + return store.NewStore[H](ds, store.WithParams(cfg.Store)) }, - fx.OnStart(func(ctx context.Context, store libhead.Store[*header.ExtendedHeader]) error { - return store.Start(ctx) + fx.OnStart(func(ctx context.Context, str libhead.Store[H]) error { + s := str.(*store.Store[H]) + return s.Start(ctx) }), - fx.OnStop(func(ctx context.Context, store libhead.Store[*header.ExtendedHeader]) error { - return store.Stop(ctx) + fx.OnStop(func(ctx context.Context, str libhead.Store[H]) error { + s := str.(*store.Store[H]) + return s.Stop(ctx) }), )), - fx.Provide(newInitStore), - fx.Provide(func(subscriber *p2p.Subscriber[*header.ExtendedHeader]) libhead.Subscriber[*header.ExtendedHeader] { + fx.Provide(newInitStore[H]), + fx.Provide(func(subscriber *p2p.Subscriber[H]) libhead.Subscriber[H] { return subscriber }), fx.Provide(fx.Annotate( - newSyncer, + newSyncer[H], fx.OnStart(func( ctx context.Context, - breaker *modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]], + breaker *modfraud.ServiceBreaker[*sync.Syncer[H], H], ) error { return breaker.Start(ctx) }), fx.OnStop(func( ctx context.Context, - breaker *modfraud.ServiceBreaker[*sync.Syncer[*header.ExtendedHeader]], + breaker *modfraud.ServiceBreaker[*sync.Syncer[H], H], ) error { return breaker.Stop(ctx) }), )), fx.Provide(fx.Annotate( - func(ps *pubsub.PubSub, network modp2p.Network) *p2p.Subscriber[*header.ExtendedHeader] { - return p2p.NewSubscriber[*header.ExtendedHeader](ps, header.MsgID, network.String()) + func(ps *pubsub.PubSub, network modp2p.Network) *p2p.Subscriber[H] { + return p2p.NewSubscriber[H](ps, header.MsgID, network.String()) }, - fx.OnStart(func(ctx context.Context, sub *p2p.Subscriber[*header.ExtendedHeader]) error { + fx.OnStart(func(ctx context.Context, sub *p2p.Subscriber[H]) error { return sub.Start(ctx) }), - fx.OnStop(func(ctx context.Context, sub *p2p.Subscriber[*header.ExtendedHeader]) error { + fx.OnStop(func(ctx context.Context, sub *p2p.Subscriber[H]) error { return sub.Stop(ctx) }), )), fx.Provide(fx.Annotate( func( host host.Host, - store libhead.Store[*header.ExtendedHeader], + store libhead.Store[H], network modp2p.Network, - ) (*p2p.ExchangeServer[*header.ExtendedHeader], error) { - return p2p.NewExchangeServer[*header.ExtendedHeader](host, store, + ) (*p2p.ExchangeServer[H], error) { + return p2p.NewExchangeServer[H](host, store, p2p.WithParams(cfg.Server), p2p.WithNetworkID[p2p.ServerParameters](network.String()), ) }, - fx.OnStart(func(ctx context.Context, server *p2p.ExchangeServer[*header.ExtendedHeader]) error { + fx.OnStart(func(ctx context.Context, server *p2p.ExchangeServer[H]) error { return server.Start(ctx) }), - fx.OnStop(func(ctx context.Context, server *p2p.ExchangeServer[*header.ExtendedHeader]) error { + fx.OnStop(func(ctx context.Context, server *p2p.ExchangeServer[H]) error { return server.Stop(ctx) }), )), @@ -96,13 +98,13 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { return fx.Module( "header", baseComponents, - fx.Provide(newP2PExchange), + fx.Provide(newP2PExchange[H]), ) case node.Bridge: return fx.Module( "header", baseComponents, - fx.Provide(func(subscriber *p2p.Subscriber[*header.ExtendedHeader]) libhead.Broadcaster[*header.ExtendedHeader] { + fx.Provide(func(subscriber *p2p.Subscriber[H]) libhead.Broadcaster[H] { return subscriber }), fx.Supply(header.MakeExtendedHeader), diff --git a/nodebuilder/header/module_test.go b/nodebuilder/header/module_test.go index 89293e4ab4..6a35e35284 100644 --- a/nodebuilder/header/module_test.go +++ b/nodebuilder/header/module_test.go @@ -38,7 +38,7 @@ func TestConstructModule_StoreParams(t *testing.T) { fx.Provide(func() datastore.Batching { return datastore.NewMapDatastore() }), - ConstructModule(node.Light, &cfg), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), fx.Invoke( func(s libhead.Store[*header.ExtendedHeader]) { ss := s.(*store.Store[*header.ExtendedHeader]) @@ -72,10 +72,10 @@ func TestConstructModule_SyncerParams(t *testing.T) { fx.Provide(func() datastore.Batching { return datastore.NewMapDatastore() }), - fx.Provide(func() fraud.Service { + fx.Provide(func() fraud.Service[*header.ExtendedHeader] { return nil }), - ConstructModule(node.Light, &cfg), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), fx.Invoke(func(s *sync.Syncer[*header.ExtendedHeader]) { syncer = s }), @@ -100,7 +100,7 @@ func TestConstructModule_ExchangeParams(t *testing.T) { fx.Provide(func() datastore.Batching { return datastore.NewMapDatastore() }), - ConstructModule(node.Light, &cfg), + ConstructModule[*header.ExtendedHeader](node.Light, &cfg), fx.Provide(func(b datastore.Batching) (*conngater.BasicConnectionGater, error) { return conngater.NewBasicConnectionGater(b) }), diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index f410c04f04..2b208cb88d 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -65,9 +65,9 @@ func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.Exten switch { case err != nil: return nil, err - case uint64(head.Height()) == height: + case head.Height() == height: return head, nil - case uint64(head.Height())+1 < height: + case head.Height()+1 < height: return nil, fmt.Errorf("header: given height is from the future: "+ "networkHeight: %d, requestedHeight: %d", head.Height(), height) } @@ -78,10 +78,10 @@ func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.Exten switch { case err != nil: return nil, err - case uint64(head.Height()) == height: + case head.Height() == height: return head, nil // `+1` allows for one header network lag, e.g. user request header that is milliseconds away - case uint64(head.Height())+1 < height: + case head.Height()+1 < height: return nil, fmt.Errorf("header: syncing in progress: "+ "localHeadHeight: %d, requestedHeight: %d", head.Height(), height) default: diff --git a/nodebuilder/header/service_test.go b/nodebuilder/header/service_test.go index 6493d3d51d..14d5ada87d 100644 --- a/nodebuilder/header/service_test.go +++ b/nodebuilder/header/service_test.go @@ -25,9 +25,9 @@ func TestGetByHeightHandlesError(t *testing.T) { }) } -type errorSyncer[H libhead.Header] struct{} +type errorSyncer[H libhead.Header[H]] struct{} -func (d *errorSyncer[H]) Head(context.Context) (H, error) { +func (d *errorSyncer[H]) Head(context.Context, ...libhead.HeadOption[H]) (H, error) { var zero H return zero, fmt.Errorf("dummy error") } diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 719705e35c..3068113102 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -5,13 +5,14 @@ import ( "go.uber.org/fx" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/gateway" - "github.com/celestiaorg/celestia-node/nodebuilder/header" + modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/rpc" @@ -46,7 +47,7 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store // modules provided by the node p2p.ConstructModule(tp, &cfg.P2P), state.ConstructModule(tp, &cfg.State, &cfg.Core), - header.ConstructModule(tp, &cfg.Header), + modhead.ConstructModule[*header.ExtendedHeader](tp, &cfg.Header), share.ConstructModule(tp, &cfg.Share), rpc.ConstructModule(tp, &cfg.RPC), gateway.ConstructModule(tp, &cfg.Gateway), diff --git a/nodebuilder/p2p/pubsub.go b/nodebuilder/p2p/pubsub.go index 0061ab9eea..13d812e3ce 100644 --- a/nodebuilder/p2p/pubsub.go +++ b/nodebuilder/p2p/pubsub.go @@ -18,6 +18,8 @@ import ( "github.com/celestiaorg/go-fraud" "github.com/celestiaorg/go-fraud/fraudserv" headp2p "github.com/celestiaorg/go-header/p2p" + + "github.com/celestiaorg/celestia-node/header" ) func init() { @@ -66,7 +68,7 @@ func pubSub(cfg Config, params pubSubParams) (*pubsub.PubSub, error) { // * https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring // * lotus // * prysm - topicScores := topicScoreParams(params.Network) + topicScores := topicScoreParams(params) peerScores, err := peerScoreParams(params.Bootstrappers, cfg) if err != nil { return nil, err @@ -105,15 +107,16 @@ type pubSubParams struct { Host hst.Host Bootstrappers Bootstrappers Network Network + Unmarshaler fraud.ProofUnmarshaler[*header.ExtendedHeader] } -func topicScoreParams(network Network) map[string]*pubsub.TopicScoreParams { +func topicScoreParams(params pubSubParams) map[string]*pubsub.TopicScoreParams { mp := map[string]*pubsub.TopicScoreParams{ - headp2p.PubsubTopicID(network.String()): &headp2p.GossibSubScore, + headp2p.PubsubTopicID(params.Network.String()): &headp2p.GossibSubScore, } - for _, pt := range fraud.Registered() { - mp[fraudserv.PubsubTopicID(pt.String(), network.String())] = &fraudserv.GossibSubScore + for _, pt := range params.Unmarshaler.List() { + mp[fraudserv.PubsubTopicID(pt.String(), params.Network.String())] = &fraudserv.GossibSubScore } return mp diff --git a/nodebuilder/settings.go b/nodebuilder/settings.go index 97440fa7dc..d56125209c 100644 --- a/nodebuilder/settings.go +++ b/nodebuilder/settings.go @@ -22,6 +22,7 @@ import ( "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/das" modheader "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" @@ -80,7 +81,7 @@ func WithMetrics(metricOpts []otlpmetrichttp.Option, nodeType node.Type) fx.Opti } state.WithMetrics(ca) }), - fx.Invoke(fraud.WithMetrics), + fx.Invoke(fraud.WithMetrics[*header.ExtendedHeader]), fx.Invoke(node.WithMetrics), fx.Invoke(modheader.WithMetrics), fx.Invoke(share.WithDiscoveryMetrics), diff --git a/nodebuilder/state/core.go b/nodebuilder/state/core.go index 4636e0f099..f8f8508540 100644 --- a/nodebuilder/state/core.go +++ b/nodebuilder/state/core.go @@ -18,11 +18,11 @@ func coreAccessor( corecfg core.Config, signer *apptypes.KeyringSigner, sync *sync.Syncer[*header.ExtendedHeader], - fraudServ libfraud.Service, -) (*state.CoreAccessor, Module, *modfraud.ServiceBreaker[*state.CoreAccessor]) { + fraudServ libfraud.Service[*header.ExtendedHeader], +) (*state.CoreAccessor, Module, *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) { ca := state.NewCoreAccessor(signer, sync, corecfg.IP, corecfg.RPCPort, corecfg.GRPCPort) - return ca, ca, &modfraud.ServiceBreaker[*state.CoreAccessor]{ + return ca, ca, &modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]{ Service: ca, FraudType: byzantine.BadEncoding, FraudServ: fraudServ, diff --git a/nodebuilder/state/module.go b/nodebuilder/state/module.go index fe90d023eb..733419a918 100644 --- a/nodebuilder/state/module.go +++ b/nodebuilder/state/module.go @@ -6,6 +6,7 @@ import ( logging "github.com/ipfs/go-log/v2" "go.uber.org/fx" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/core" modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -26,10 +27,12 @@ func ConstructModule(tp node.Type, cfg *Config, coreCfg *core.Config) fx.Option fx.Error(cfgErr), fxutil.ProvideIf(coreCfg.IsEndpointConfigured(), fx.Annotate( coreAccessor, - fx.OnStart(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*state.CoreAccessor]) error { + fx.OnStart(func(ctx context.Context, + breaker *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) error { return breaker.Start(ctx) }), - fx.OnStop(func(ctx context.Context, breaker *modfraud.ServiceBreaker[*state.CoreAccessor]) error { + fx.OnStop(func(ctx context.Context, + breaker *modfraud.ServiceBreaker[*state.CoreAccessor, *header.ExtendedHeader]) error { return breaker.Stop(ctx) }), )), diff --git a/nodebuilder/testing.go b/nodebuilder/testing.go index 6cb40a2b6c..36f2c2f47f 100644 --- a/nodebuilder/testing.go +++ b/nodebuilder/testing.go @@ -11,9 +11,10 @@ import ( apptypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/celestia-node/core" + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" "github.com/celestiaorg/celestia-node/libs/fxutil" - "github.com/celestiaorg/celestia-node/nodebuilder/header" + modhead "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/nodebuilder/state" @@ -47,7 +48,7 @@ func TestNodeWithConfig(t *testing.T, tp node.Type, cfg *Config, opts ...fx.Opti // temp dir for the eds store FIXME: Should be in mem fx.Replace(node.StorePath(t.TempDir())), // avoid requesting trustedPeer during initialization - fxutil.ReplaceAs(headertest.NewStore(t), new(header.InitStore)), + fxutil.ReplaceAs(headertest.NewStore(t), new(modhead.InitStore[*header.ExtendedHeader])), ) // in fact, we don't need core.Client in tests, but Bridge requires is a valid one diff --git a/nodebuilder/tests/api_test.go b/nodebuilder/tests/api_test.go index 1bc1c261de..3a66c4e58c 100644 --- a/nodebuilder/tests/api_test.go +++ b/nodebuilder/tests/api_test.go @@ -80,12 +80,12 @@ func TestGetByHeight(t *testing.T) { networkHead, err := client.Header.NetworkHead(ctx) require.NoError(t, err) - _, err = client.Header.GetByHeight(ctx, uint64(networkHead.Height()+1)) + _, err = client.Header.GetByHeight(ctx, networkHead.Height()+1) require.Nil(t, err, "Requesting syncer.Head()+1 shouldn't return an error") networkHead, err = client.Header.NetworkHead(ctx) require.NoError(t, err) - _, err = client.Header.GetByHeight(ctx, uint64(networkHead.Height()+2)) + _, err = client.Header.GetByHeight(ctx, networkHead.Height()+2) require.ErrorContains(t, err, "given height is from the future") } diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go index dfa3577599..234556a3aa 100644 --- a/nodebuilder/tests/sync_test.go +++ b/nodebuilder/tests/sync_test.go @@ -295,7 +295,7 @@ func TestSyncLightAgainstFull(t *testing.T) { require.NoError(t, err) bridgeHead, err := bridge.HeaderServ.LocalHead(ctx) require.NoError(t, err) - _, err = full.HeaderServ.WaitForHeight(ctx, uint64(bridgeHead.Height())) + _, err = full.HeaderServ.WaitForHeight(ctx, bridgeHead.Height()) require.NoError(t, err) // reset suite bootstrapper list and set full node as a bootstrapper for @@ -316,7 +316,7 @@ func TestSyncLightAgainstFull(t *testing.T) { require.NoError(t, err) fullHead, err := full.HeaderServ.LocalHead(ctx) require.NoError(t, err) - _, err = light.HeaderServ.WaitForHeight(ctx, uint64(fullHead.Height())) + _, err = light.HeaderServ.WaitForHeight(ctx, fullHead.Height()) require.NoError(t, err) // wait for the core block filling process to exit diff --git a/share/eds/byzantine/bad_encoding.go b/share/eds/byzantine/bad_encoding.go index 3c5bc6951b..e3a862e38a 100644 --- a/share/eds/byzantine/bad_encoding.go +++ b/share/eds/byzantine/bad_encoding.go @@ -7,7 +7,6 @@ import ( "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/go-fraud" - libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" @@ -22,10 +21,6 @@ const ( BadEncoding fraud.ProofType = "badencoding" + version ) -func init() { - fraud.Register(&BadEncodingProof{}) -} - type BadEncodingProof struct { headerHash []byte BlockHeight uint64 @@ -46,8 +41,7 @@ func CreateBadEncodingProof( hash []byte, height uint64, errByzantine *ErrByzantine, -) fraud.Proof { - +) fraud.Proof[*header.ExtendedHeader] { return &BadEncodingProof{ headerHash: hash, BlockHeight: height, @@ -112,34 +106,29 @@ func (p *BadEncodingProof) UnmarshalBinary(data []byte) error { // Validate checks that provided Merkle Proofs correspond to the shares, // rebuilds bad row or col from received shares, computes Merkle Root // and compares it with block's Merkle Root. -func (p *BadEncodingProof) Validate(hdr libhead.Header) error { - header, ok := hdr.(*header.ExtendedHeader) - if !ok { - panic(fmt.Sprintf("invalid header type received during BEFP validation: expected %T, got %T", header, hdr)) - } - - if header.Height() != int64(p.BlockHeight) { +func (p *BadEncodingProof) Validate(hdr *header.ExtendedHeader) error { + if hdr.Height() != p.BlockHeight { return fmt.Errorf("incorrect block height during BEFP validation: expected %d, got %d", - p.BlockHeight, header.Height(), + p.BlockHeight, hdr.Height(), ) } - if len(header.DAH.RowRoots) != len(header.DAH.ColumnRoots) { + if len(hdr.DAH.RowRoots) != len(hdr.DAH.ColumnRoots) { // NOTE: This should never happen as callers of this method should not feed it with a // malformed extended header. panic(fmt.Sprintf( "invalid extended header: length of row and column roots do not match. (rowRoots=%d) (colRoots=%d)", - len(header.DAH.RowRoots), - len(header.DAH.ColumnRoots)), + len(hdr.DAH.RowRoots), + len(hdr.DAH.ColumnRoots)), ) } // merkleRoots are the roots against which we are going to check the inclusion of the received // shares. Changing the order of the roots to prove the shares relative to the orthogonal axis, // because inside the rsmt2d library rsmt2d.Row = 0 and rsmt2d.Col = 1 - merkleRoots := header.DAH.RowRoots + merkleRoots := hdr.DAH.RowRoots if p.Axis == rsmt2d.Row { - merkleRoots = header.DAH.ColumnRoots + merkleRoots = hdr.DAH.ColumnRoots } if int(p.Index) >= len(merkleRoots) { @@ -196,7 +185,7 @@ func (p *BadEncodingProof) Validate(hdr libhead.Header) error { rebuiltShares, err := codec.Decode(shares) if err != nil { log.Infow("failed to decode shares at height", - "height", header.Height(), "err", err, + "height", hdr.Height(), "err", err, ) return nil } @@ -204,7 +193,7 @@ func (p *BadEncodingProof) Validate(hdr libhead.Header) error { rebuiltExtendedShares, err := codec.Encode(rebuiltShares[0:odsWidth]) if err != nil { log.Infow("failed to encode shares at height", - "height", header.Height(), "err", err, + "height", hdr.Height(), "err", err, ) return nil } @@ -215,7 +204,7 @@ func (p *BadEncodingProof) Validate(hdr libhead.Header) error { err = tree.Push(share) if err != nil { log.Infow("failed to build a tree from the reconstructed shares at height", - "height", header.Height(), "err", err, + "height", hdr.Height(), "err", err, ) return nil } @@ -224,15 +213,15 @@ func (p *BadEncodingProof) Validate(hdr libhead.Header) error { expectedRoot, err := tree.Root() if err != nil { log.Infow("failed to build a tree root at height", - "height", header.Height(), "err", err, + "height", hdr.Height(), "err", err, ) return nil } // root is a merkle root of the row/col where ErrByzantine occurred - root := header.DAH.RowRoots[p.Index] + root := hdr.DAH.RowRoots[p.Index] if p.Axis == rsmt2d.Col { - root = header.DAH.ColumnRoots[p.Index] + root = hdr.DAH.ColumnRoots[p.Index] } // comparing rebuilt Merkle Root of bad row/col with respective Merkle Root of row/col from block. diff --git a/share/eds/byzantine/bad_encoding_test.go b/share/eds/byzantine/bad_encoding_test.go index 49cf64c2c2..b5dcea3452 100644 --- a/share/eds/byzantine/bad_encoding_test.go +++ b/share/eds/byzantine/bad_encoding_test.go @@ -86,7 +86,7 @@ func TestIncorrectBadEncodingFraudProof(t *testing.T) { }, } - proof := CreateBadEncodingProof(h.Hash(), uint64(h.Height()), &fakeError) + proof := CreateBadEncodingProof(h.Hash(), h.Height(), &fakeError) err = proof.Validate(h) require.Error(t, err) } diff --git a/share/eds/retriever_test.go b/share/eds/retriever_test.go index ebccf0e384..12b1c11083 100644 --- a/share/eds/retriever_test.go +++ b/share/eds/retriever_test.go @@ -145,7 +145,7 @@ func TestFraudProofValidation(t *testing.T) { faultHeader, err := generateByzantineError(ctx, t, size, bServ) require.True(t, errors.As(err, &errByz)) - p := byzantine.CreateBadEncodingProof([]byte("hash"), uint64(faultHeader.Height()), errByz) + p := byzantine.CreateBadEncodingProof([]byte("hash"), faultHeader.Height(), errByz) err = p.Validate(faultHeader) require.NoError(t, err) }) @@ -197,7 +197,7 @@ func BenchmarkBEFPValidation(b *testing.B) { for i := 0; i < b.N; i++ { b.ReportAllocs() - p := byzantine.CreateBadEncodingProof([]byte("hash"), uint64(h.Height()), errByz) + p := byzantine.CreateBadEncodingProof([]byte("hash"), h.Height(), errByz) err = p.Validate(h) require.NoError(b, err) } diff --git a/share/p2p/discovery/discovery.go b/share/p2p/discovery/discovery.go index c880b9b3c2..f24df2c88b 100644 --- a/share/p2p/discovery/discovery.go +++ b/share/p2p/discovery/discovery.go @@ -199,7 +199,8 @@ func (d *Discovery) Advertise(ctx context.Context) { } // discoveryLoop ensures we always have '~peerLimit' connected peers. -// It initiates peer discovery upon request and restarts the process until the soft limit is reached. +// It initiates peer discovery upon request and restarts the process until the soft limit is +// reached. func (d *Discovery) discoveryLoop(ctx context.Context) { t := time.NewTicker(discoveryRetryTimeout) defer t.Stop() diff --git a/share/p2p/peers/manager.go b/share/p2p/peers/manager.go index 2a7c1fee18..87f9361ee2 100644 --- a/share/p2p/peers/manager.go +++ b/share/p2p/peers/manager.go @@ -293,7 +293,7 @@ func (m *Manager) subscribeHeader(ctx context.Context, headerSub libhead.Subscri m.validatedPool(h.DataHash.String()) // store first header for validation purposes - if m.initialHeight.CompareAndSwap(0, uint64(h.Height())) { + if m.initialHeight.CompareAndSwap(0, h.Height()) { log.Debugw("stored initial height", "height", h.Height()) } } diff --git a/share/p2p/peers/manager_test.go b/share/p2p/peers/manager_test.go index e10e820e84..ad04d2c7bd 100644 --- a/share/p2p/peers/manager_test.go +++ b/share/p2p/peers/manager_test.go @@ -274,7 +274,7 @@ func TestManager(t *testing.T) { // create shrexSub msg with height lower than first header from headerSub msg := shrexsub.Notification{ DataHash: share.DataHash("datahash"), - Height: uint64(h.Height() - 1), + Height: h.Height() - 1, } result := manager.Validate(ctx, "peer", msg) require.Equal(t, pubsub.ValidationIgnore, result) @@ -298,7 +298,7 @@ func TestManager(t *testing.T) { // create shrexSub msg with height lower than first header from headerSub msg := shrexsub.Notification{ DataHash: share.DataHash("datahash"), - Height: uint64(h.Height() - 1), + Height: h.Height() - 1, } result := manager.Validate(ctx, "peer", msg) require.Equal(t, pubsub.ValidationIgnore, result) @@ -537,7 +537,7 @@ func (s *subLock) Subscribe() (libhead.Subscription[*header.ExtendedHeader], err return s, nil } -func (s *subLock) AddValidator(func(context.Context, *header.ExtendedHeader) pubsub.ValidationResult) error { +func (s *subLock) SetVerifier(func(context.Context, *header.ExtendedHeader) error) error { panic("implement me") } @@ -561,6 +561,6 @@ func (s *subLock) Cancel() { func newShrexSubMsg(h *header.ExtendedHeader) shrexsub.Notification { return shrexsub.Notification{ DataHash: h.DataHash.Bytes(), - Height: uint64(h.Height()), + Height: h.Height(), } } diff --git a/state/core_access.go b/state/core_access.go index 43f5313c5f..d8f6894e24 100644 --- a/state/core_access.go +++ b/state/core_access.go @@ -180,9 +180,10 @@ func (ca *CoreAccessor) constructSignedTx( return ca.signer.EncodeTx(tx) } -// SubmitPayForBlob builds, signs, and synchronously submits a MsgPayForBlob. It blocks until the transaction -// is committed and returns the TxReponse. If gasLim is set to 0, the method will automatically estimate the -// gas limit. If the fee is negative, the method will use the nodes min gas price multiplied by the gas limit. +// SubmitPayForBlob builds, signs, and synchronously submits a MsgPayForBlob. It blocks until the +// transaction is committed and returns the TxReponse. If gasLim is set to 0, the method will +// automatically estimate the gas limit. If the fee is negative, the method will use the nodes min +// gas price multiplied by the gas limit. func (ca *CoreAccessor) SubmitPayForBlob( ctx context.Context, fee Int, @@ -201,8 +202,8 @@ func (ca *CoreAccessor) SubmitPayForBlob( appblobs[i] = &b.Blob } - // we only estimate gas if the user wants us to (by setting the gasLim to 0). In the future we may want - // to make these arguments optional. + // we only estimate gas if the user wants us to (by setting the gasLim to 0). In the future we may + // want to make these arguments optional. if gasLim == 0 { blobSizes := make([]uint32, len(blobs)) for i, blob := range blobs { @@ -294,7 +295,7 @@ func (ca *CoreAccessor) BalanceForAddress(ctx context.Context, addr Address) (*B abciReq := abci.RequestQuery{ // TODO @renayay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use const instead Path: fmt.Sprintf("store/%s/key", banktypes.StoreKey), - Height: head.Height() - 1, + Height: int64(head.Height() - 1), Data: prefixedAccountKey, Prove: true, } diff --git a/state/integration_test.go b/state/integration_test.go index 8862de1bf8..193e7bddc7 100644 --- a/state/integration_test.go +++ b/state/integration_test.go @@ -20,6 +20,7 @@ import ( "github.com/celestiaorg/celestia-app/test/util/testfactory" "github.com/celestiaorg/celestia-app/test/util/testnode" blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" + libhead "github.com/celestiaorg/go-header" "github.com/celestiaorg/celestia-node/core" "github.com/celestiaorg/celestia-node/header" @@ -95,7 +96,10 @@ type localHeader struct { client rpcclient.Client } -func (l localHeader) Head(ctx context.Context) (*header.ExtendedHeader, error) { +func (l localHeader) Head( + ctx context.Context, + _ ...libhead.HeadOption[*header.ExtendedHeader], +) (*header.ExtendedHeader, error) { latest, err := l.client.Block(ctx, nil) if err != nil { return nil, err From 47047f39dbd7bd344ce47102e7da3aa34a06bba1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jose=20Ramon=20Ma=C3=B1es?= <32740567+jrmanes@users.noreply.github.com> Date: Thu, 31 Aug 2023 10:56:28 +0200 Subject: [PATCH 03/19] chore(nodebuilder/p2p)!: Bump arabica-10 (#2639) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hello team! This PR contains the updates for `arabica-10`: - the genesis hash - chain-id - bootstrappers thanks in advance! 🚀 Jose Ramon Mañes --------- Signed-off-by: Jose Ramon Mañes --- nodebuilder/p2p/bootstrap.go | 8 ++++---- nodebuilder/p2p/genesis.go | 2 +- nodebuilder/p2p/network.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nodebuilder/p2p/bootstrap.go b/nodebuilder/p2p/bootstrap.go index 376d95fb14..0718d8cf59 100644 --- a/nodebuilder/p2p/bootstrap.go +++ b/nodebuilder/p2p/bootstrap.go @@ -38,10 +38,10 @@ func bootstrappersFor(net Network) ([]string, error) { // NOTE: Every time we add a new long-running network, its bootstrap peers have to be added here. var bootstrapList = map[Network][]string{ Arabica: { - "/dns4/da-bridge-arabica-9.celestia-arabica.com/tcp/2121/p2p/12D3KooWBLvsfkbovAH74DbGGxHPpVW7DkvKdbQxhorrkv9tfGZU", - "/dns4/da-bridge-arabica-9-2.celestia-arabica.com/tcp/2121/p2p/12D3KooWNjJSk8JcY7VoLEjGGUz8CXp9Bxt495zXmdmccjaMPgHf", - "/dns4/da-full-1-arabica-9.celestia-arabica.com/tcp/2121/p2p/12D3KooWFUK2Z4WPsQN3p5n8tgBigxP32gbmABUet2UMby2Ha9ZK", - "/dns4/da-full-2-arabica-9.celestia-arabica.com/tcp/2121/p2p/12D3KooWKnmwsimoghxUT1DXr7f8yXbWCfmXDk4UGbQDsAks9XsN", + "/dns4/da-bridge.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWM3e9MWtyc8GkP8QRt74Riu17QuhGfZMytB2vq5NwkWAu", + "/dns4/da-bridge-2.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWKj8mcdiBGxQRe1jqhaMnh2tGoC3rPDmr5UH2q8H4WA9M", + "/dns4/da-full-1.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWBWkgmN7kmJSFovVrCjkeG47FkLGq7yEwJ2kEqNKCsBYk", + "/dns4/da-full-2.celestia-arabica-10.com/tcp/2121/p2p/12D3KooWRByRF67a2kVM2j4MP5Po3jgTw7H2iL2Spu8aUwPkrRfP", }, Mocha: { "/dns4/bootstr-mocha-1.celestia-mocha.com/tcp/2121/p2p/12D3KooWDRSJMbH3PS4dRDa11H7Tk615aqTUgkeEKz4pwd4sS6fN", diff --git a/nodebuilder/p2p/genesis.go b/nodebuilder/p2p/genesis.go index 1fb6b10a55..31ea0b0ae9 100644 --- a/nodebuilder/p2p/genesis.go +++ b/nodebuilder/p2p/genesis.go @@ -23,7 +23,7 @@ func GenesisFor(net Network) (string, error) { // NOTE: Every time we add a new long-running network, its genesis hash has to be added here. var genesisList = map[Network]string{ - Arabica: "7A5FABB19713D732D967B1DA84FA0DF5E87A7B62302D783F78743E216C1A3550", + Arabica: "5904E55478BA4B3002EE885621E007A2A6A2399662841912219AECD5D5CBE393", Mocha: "79A97034D569C4199A867439B1B7B77D4E1E1D9697212755E1CE6D920CDBB541", BlockspaceRace: "1A8491A72F73929680DAA6C93E3B593579261B2E76536BFA4F5B97D6FE76E088", Private: "", diff --git a/nodebuilder/p2p/network.go b/nodebuilder/p2p/network.go index b1c3a5fbb7..29e0218bb2 100644 --- a/nodebuilder/p2p/network.go +++ b/nodebuilder/p2p/network.go @@ -12,7 +12,7 @@ const ( // DefaultNetwork is the default network of the current build. DefaultNetwork = Mocha // Arabica testnet. See: celestiaorg/networks. - Arabica Network = "arabica-9" + Arabica Network = "arabica-10" // Mocha testnet. See: celestiaorg/networks. Mocha Network = "mocha-3" // BlockspaceRace testnet. See: https://docs.celestia.org/nodes/blockspace-race/. From 9ff98c4b611bac7b1d08135a4c249e74b1e7afa2 Mon Sep 17 00:00:00 2001 From: Ryan Date: Mon, 4 Sep 2023 11:58:43 +0200 Subject: [PATCH 04/19] chore(deps): bump golanci-lint workflow (#2641) Self explanatory. The reason for the fix in core_accessor was an implicit memory aliasing. It was taking the address of the loop variable, which doesn't change in between iterations ofc, so the every blob pointed to the last blob --- .github/workflows/go-ci.yml | 2 +- .golangci.yml | 2 +- cmd/celestia/rpc.go | 2 +- nodebuilder/config.go | 10 +++++----- state/core_access.go | 6 +++--- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml index 7435b7912f..41b3a82994 100644 --- a/.github/workflows/go-ci.yml +++ b/.github/workflows/go-ci.yml @@ -25,7 +25,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v3.6.0 with: - version: v1.52.2 + version: v1.54.2 go_mod_tidy_check: name: Go Mod Tidy Check diff --git a/.golangci.yml b/.golangci.yml index 5f7b13e6a5..a0f2754a9b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,7 @@ run: linters: enable: - bodyclose - - depguard + # - depguard as of v1.54.2, the default config throws errors on our repo - dogsled - dupl - errcheck diff --git a/cmd/celestia/rpc.go b/cmd/celestia/rpc.go index b8724fa789..c263496b26 100644 --- a/cmd/celestia/rpc.go +++ b/cmd/celestia/rpc.go @@ -23,7 +23,7 @@ import ( "github.com/celestiaorg/celestia-node/state" ) -const authEnvKey = "CELESTIA_NODE_AUTH_TOKEN" +const authEnvKey = "CELESTIA_NODE_AUTH_TOKEN" //nolint:gosec var requestURL string var authTokenFlag string diff --git a/nodebuilder/config.go b/nodebuilder/config.go index 670bbf9bbd..41f24d6d3d 100644 --- a/nodebuilder/config.go +++ b/nodebuilder/config.go @@ -114,7 +114,7 @@ func removeConfig(path string) error { func UpdateConfig(tp node.Type, path string) (err error) { path, err = storePath(path) if err != nil { - return + return err } flock, err := fslock.Lock(lockPath(path)) @@ -122,7 +122,7 @@ func UpdateConfig(tp node.Type, path string) (err error) { if err == fslock.ErrLocked { err = ErrOpened } - return + return err } defer flock.Unlock() //nolint: errcheck @@ -131,18 +131,18 @@ func UpdateConfig(tp node.Type, path string) (err error) { cfgPath := configPath(path) cfg, err := LoadConfig(cfgPath) if err != nil { - return + return err } cfg, err = updateConfig(cfg, newCfg) if err != nil { - return + return err } // save the updated config err = removeConfig(cfgPath) if err != nil { - return + return err } return SaveConfig(cfgPath, cfg) } diff --git a/state/core_access.go b/state/core_access.go index d8f6894e24..2a49e70a03 100644 --- a/state/core_access.go +++ b/state/core_access.go @@ -195,11 +195,11 @@ func (ca *CoreAccessor) SubmitPayForBlob( } appblobs := make([]*apptypes.Blob, len(blobs)) - for i, b := range blobs { - if err := b.Namespace().ValidateForBlob(); err != nil { + for i := range blobs { + if err := blobs[i].Namespace().ValidateForBlob(); err != nil { return nil, err } - appblobs[i] = &b.Blob + appblobs[i] = &blobs[i].Blob } // we only estimate gas if the user wants us to (by setting the gasLim to 0). In the future we may From da841121a8c9eb1834f2aac5079124a2917ecec4 Mon Sep 17 00:00:00 2001 From: Vlad <13818348+walldiss@users.noreply.github.com> Date: Mon, 4 Sep 2023 18:33:07 +0800 Subject: [PATCH 05/19] feat(share/eds) add dagstore shards status metric (#2642) Adds metrics to expose shards statistics. --- share/eds/metrics.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/share/eds/metrics.go b/share/eds/metrics.go index 6547e239cd..1f430bf688 100644 --- a/share/eds/metrics.go +++ b/share/eds/metrics.go @@ -24,6 +24,8 @@ const ( longOpUnresolved longOpResult = "unresolved" longOpOK longOpResult = "ok" longOpFailed longOpResult = "failed" + + dagstoreShardStatusKey = "shard_status" ) var ( @@ -116,9 +118,31 @@ func (s *Store) WithMetrics() error { return err } + dagStoreShards, err := meter.Int64ObservableGauge("eds_store_dagstore_shards", + metric.WithDescription("dagstore amount of shards by status")) + if err != nil { + return err + } + if err = s.cache.withMetrics(); err != nil { return err } + + callback := func(ctx context.Context, observer metric.Observer) error { + stats := s.dgstr.Stats() + for status, amount := range stats { + observer.ObserveInt64(dagStoreShards, int64(amount), + metric.WithAttributes( + attribute.String(dagstoreShardStatusKey, status.String()), + )) + } + return nil + } + + if _, err := meter.RegisterCallback(callback, dagStoreShards); err != nil { + return err + } + s.metrics = &metrics{ putTime: putTime, getCARTime: getCARTime, From 613d229a70cd73c0b700c2383b7a92af703e6861 Mon Sep 17 00:00:00 2001 From: Ryan Date: Mon, 4 Sep 2023 13:18:01 +0200 Subject: [PATCH 06/19] refactor(share)!: have NamespacedShares fields use snake_case in JSON (#2633) Related #2631 , does not fix without nmt PR and release --- share/getter.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/share/getter.go b/share/getter.go index 19e4498458..18d3873de1 100644 --- a/share/getter.go +++ b/share/getter.go @@ -13,7 +13,8 @@ import ( var ( // ErrNotFound is used to indicate that requested data could not be found. ErrNotFound = errors.New("share: data not found") - // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the square size. + // ErrOutOfBounds is used to indicate that a passed row or column index is out of bounds of the + // square size. ErrOutOfBounds = errors.New("share: row or column index is larger than square size") ) @@ -50,8 +51,8 @@ func (ns NamespacedShares) Flatten() []Share { // NamespacedRow represents all shares with proofs within a specific namespace of a single EDS row. type NamespacedRow struct { - Shares []Share - Proof *nmt.Proof + Shares []Share `json:"shares"` + Proof *nmt.Proof `json:"proof"` } // Verify validates NamespacedShares by checking every row with nmt inclusion proof. From ab56434799834415ba18915af02e9315c6dda15e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:39:27 +0000 Subject: [PATCH 07/19] chore(deps): bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 (#2586) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.6.0 to 3.7.0.
Release notes

Sourced from golangci/golangci-lint-action's releases.

v3.7.0

What's Changed

Changes

Documentation

Dependencies

... (truncated)

Commits
  • 3a91952 feat: working-directory with only-new-issues (#795)
  • 5e67631 build(deps): bump @​actions/cache from 3.2.1 to 3.2.2 (#825)
  • 18dad33 build(deps-dev): bump @​typescript-eslint/eslint-plugin from 6.2.1 to 6.3.0 (#...
  • 945dc98 build(deps): bump @​types/node from 20.4.8 to 20.5.0 (#824)
  • cac24f5 build(deps-dev): bump eslint from 8.45.0 to 8.47.0 (#823)
  • ab66454 build(deps-dev): bump @​typescript-eslint/parser from 6.2.0 to 6.3.0 (#821)
  • a252750 build(deps): bump @​actions/http-client from 2.1.0 to 2.1.1 (#819)
  • 44d9998 build(deps-dev): bump eslint-config-prettier from 8.9.0 to 9.0.0 (#818)
  • b6bdfb3 build(deps-dev): bump prettier from 3.0.0 to 3.0.1 (#817)
  • c0cd965 build(deps-dev): bump @​typescript-eslint/eslint-plugin from 6.2.0 to 6.2.1 (#...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golangci/golangci-lint-action&package-manager=github_actions&previous-version=3.6.0&new-version=3.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/go-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml index 41b3a82994..f68825bec1 100644 --- a/.github/workflows/go-ci.yml +++ b/.github/workflows/go-ci.yml @@ -23,7 +23,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: golangci-lint - uses: golangci/golangci-lint-action@v3.6.0 + uses: golangci/golangci-lint-action@v3.7.0 with: version: v1.54.2 From afc0504de67bf0ea20e528c9fbdad335df0f6f2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:42:34 +0000 Subject: [PATCH 08/19] chore(deps): bump alpine from 3.18.2 to 3.18.3 (#2564) Bumps alpine from 3.18.2 to 3.18.3. [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=alpine&package-manager=docker&previous-version=3.18.2&new-version=3.18.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c8e9bb2e1c..1f1705c2f3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ COPY . . RUN make build && make cel-key -FROM docker.io/alpine:3.18.2 +FROM docker.io/alpine:3.18.3 # Read here why UID 10001: https://github.com/hexops/dockerfile/blob/main/README.md#do-not-use-a-uid-below-10000 ARG UID=10001 From 80d6dbaa7278ffc590a75d556daffbb6ac37d657 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 13:43:14 +0200 Subject: [PATCH 09/19] chore(deps): bump cosmossdk.io/math from 1.1.1 to 1.1.2 (#2594) Bumps [cosmossdk.io/math](https://github.com/cosmos/cosmos-sdk) from 1.1.1 to 1.1.2.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=cosmossdk.io/math&package-manager=go_modules&previous-version=1.1.1&new-version=1.1.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 846f4316e6..38b2068178 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ replace github.com/ipfs/go-verifcid => github.com/celestiaorg/go-verifcid v0.0.1 require ( cosmossdk.io/errors v1.0.0 - cosmossdk.io/math v1.1.1 + cosmossdk.io/math v1.1.2 github.com/BurntSushi/toml v1.3.2 github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b github.com/benbjohnson/clock v1.3.5 diff --git a/go.sum b/go.sum index 09bd8ab26e..feaba34043 100644 --- a/go.sum +++ b/go.sum @@ -195,8 +195,8 @@ cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoIS collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= -cosmossdk.io/math v1.1.1 h1:Eqx44E6fSvG055Z6VNiCLWA9fra0JSyP0kQX7VvNNfk= -cosmossdk.io/math v1.1.1/go.mod h1:uFRkSZDz38KjWjm6jN+/sI8tJWQxbGwxcjOTzapWSpE= +cosmossdk.io/math v1.1.2 h1:ORZetZCTyWkI5GlZ6CZS28fMHi83ZYf+A2vVnHNzZBM= +cosmossdk.io/math v1.1.2/go.mod h1:l2Gnda87F0su8a/7FEKJfFdJrM0JZRXQaohlgJeyQh0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= From d75a743d7c31b4edfdb8e3dbe8dbca0008bea9cb Mon Sep 17 00:00:00 2001 From: Viacheslav Date: Mon, 4 Sep 2023 15:09:42 +0300 Subject: [PATCH 10/19] tests(swamp): befp testing (#2584) --- das/daser_test.go | 3 +- header/headertest/fraud/testing.go | 100 +++++++++++++++++ header/headertest/testing.go | 48 --------- nodebuilder/fraud/lifecycle.go | 2 +- nodebuilder/p2p/opts.go | 6 ++ nodebuilder/tests/fraud_test.go | 168 +++++++++++------------------ nodebuilder/tests/swamp/swamp.go | 14 +++ share/eds/edstest/testing.go | 8 +- share/getters/cascade.go | 10 +- share/getters/ipld.go | 5 + 10 files changed, 208 insertions(+), 156 deletions(-) create mode 100644 header/headertest/fraud/testing.go diff --git a/das/daser_test.go b/das/daser_test.go index 68f6e01ef2..e4e74dc7ff 100644 --- a/das/daser_test.go +++ b/das/daser_test.go @@ -23,6 +23,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" + headerfraud "github.com/celestiaorg/celestia-node/header/headertest/fraud" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/availability/full" "github.com/celestiaorg/celestia-node/share/availability/light" @@ -180,7 +181,7 @@ func TestDASer_stopsAfter_BEFP(t *testing.T) { "private", ) require.NoError(t, fserv.Start(ctx)) - mockGet.headers[1], _ = headertest.CreateFraudExtHeader(t, mockGet.headers[1], bServ) + mockGet.headers[1] = headerfraud.CreateFraudExtHeader(t, mockGet.headers[1], bServ) newCtx := context.Background() // create and start DASer diff --git a/header/headertest/fraud/testing.go b/header/headertest/fraud/testing.go new file mode 100644 index 0000000000..6a5cda733d --- /dev/null +++ b/header/headertest/fraud/testing.go @@ -0,0 +1,100 @@ +package headerfraud + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-blockservice" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/edstest" + "github.com/celestiaorg/celestia-node/share/ipld" +) + +// FraudMaker allows to produce an invalid header at the specified height in order to produce the +// BEFP. +type FraudMaker struct { + t *testing.T + + vals []types.PrivValidator + valSet *types.ValidatorSet + + // height of the invalid header + height int64 + + prevHash bytes.HexBytes +} + +func NewFraudMaker(t *testing.T, height int64, vals []types.PrivValidator, valSet *types.ValidatorSet) *FraudMaker { + return &FraudMaker{ + t: t, + vals: vals, + valSet: valSet, + height: height, + } +} + +func (f *FraudMaker) MakeExtendedHeader(odsSize int, edsStore *eds.Store) header.ConstructFn { + return func(h *types.Header, + comm *types.Commit, + vals *types.ValidatorSet, + eds *rsmt2d.ExtendedDataSquare, + ) (*header.ExtendedHeader, error) { + if h.Height < f.height { + return header.MakeExtendedHeader(h, comm, vals, eds) + } + + hdr := *h + if h.Height == f.height { + adder := ipld.NewProofsAdder(odsSize) + square := edstest.RandByzantineEDS(f.t, odsSize, nmt.NodeVisitor(adder.VisitFn())) + dah, err := da.NewDataAvailabilityHeader(square) + require.NoError(f.t, err) + hdr.DataHash = dah.Hash() + + ctx := ipld.CtxWithProofsAdder(context.Background(), adder) + require.NoError(f.t, edsStore.Put(ctx, h.DataHash.Bytes(), square)) + + *eds = *square + } + if h.Height > f.height { + hdr.LastBlockID.Hash = f.prevHash + } + + blockID := comm.BlockID + blockID.Hash = hdr.Hash() + voteSet := types.NewVoteSet(hdr.ChainID, hdr.Height, 0, tmproto.PrecommitType, f.valSet) + commit, err := headertest.MakeCommit(blockID, hdr.Height, 0, voteSet, f.vals, time.Now()) + require.NoError(f.t, err) + + *h = hdr + *comm = *commit + f.prevHash = h.Hash() + return header.MakeExtendedHeader(h, comm, vals, eds) + } +} +func CreateFraudExtHeader( + t *testing.T, + eh *header.ExtendedHeader, + serv blockservice.BlockService, +) *header.ExtendedHeader { + square := edstest.RandByzantineEDS(t, len(eh.DAH.RowRoots)) + err := ipld.ImportEDS(context.Background(), square, serv) + require.NoError(t, err) + dah, err := da.NewDataAvailabilityHeader(square) + require.NoError(t, err) + eh.DAH = &dah + eh.RawHeader.DataHash = dah.Hash() + return eh +} diff --git a/header/headertest/testing.go b/header/headertest/testing.go index 65ae8c950f..f288556bd9 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -1,7 +1,6 @@ package headertest import ( - "context" "crypto/rand" "fmt" mrand "math/rand" @@ -9,8 +8,6 @@ import ( "testing" "time" - "github.com/ipfs/go-blockservice" - logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/bytes" @@ -26,12 +23,8 @@ import ( "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" - "github.com/celestiaorg/celestia-node/share/eds/edstest" - "github.com/celestiaorg/celestia-node/share/ipld" ) -var log = logging.Logger("headertest") - // TestSuite provides everything you need to test chain of Headers. // If not, please don't hesitate to extend it for your case. type TestSuite struct { @@ -296,32 +289,6 @@ func RandBlockID(*testing.T) types.BlockID { return bid } -// FraudMaker creates a custom ConstructFn that breaks the block at the given height. -func FraudMaker(t *testing.T, faultHeight int64, bServ blockservice.BlockService) header.ConstructFn { - log.Warn("Corrupting block...", "height", faultHeight) - return func( - h *types.Header, - comm *types.Commit, - vals *types.ValidatorSet, - eds *rsmt2d.ExtendedDataSquare, - ) (*header.ExtendedHeader, error) { - if h.Height == faultHeight { - eh := &header.ExtendedHeader{ - RawHeader: *h, - Commit: comm, - ValidatorSet: vals, - } - - eh, dataSq := CreateFraudExtHeader(t, eh, bServ) - if eds != nil { - *eds = *dataSq - } - return eh, nil - } - return header.MakeExtendedHeader(h, comm, vals, eds) - } -} - func ExtendedHeaderFromEDS(t *testing.T, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { valSet, vals := RandValidatorSet(10, 10) gen := RandRawHeader(t) @@ -348,21 +315,6 @@ func ExtendedHeaderFromEDS(t *testing.T, height uint64, eds *rsmt2d.ExtendedData return eh } -func CreateFraudExtHeader( - t *testing.T, - eh *header.ExtendedHeader, - serv blockservice.BlockService, -) (*header.ExtendedHeader, *rsmt2d.ExtendedDataSquare) { - square := edstest.RandByzantineEDS(t, len(eh.DAH.RowRoots)) - err := ipld.ImportEDS(context.Background(), square, serv) - require.NoError(t, err) - dah, err := da.NewDataAvailabilityHeader(square) - require.NoError(t, err) - eh.DAH = &dah - eh.RawHeader.DataHash = dah.Hash() - return eh, square -} - type Subscriber struct { headertest.Subscriber[*header.ExtendedHeader] } diff --git a/nodebuilder/fraud/lifecycle.go b/nodebuilder/fraud/lifecycle.go index 1a6702aafa..50f4e1035b 100644 --- a/nodebuilder/fraud/lifecycle.go +++ b/nodebuilder/fraud/lifecycle.go @@ -73,7 +73,7 @@ func (breaker *ServiceBreaker[S, H]) Stop(ctx context.Context) error { } breaker.sub.Cancel() - breaker.cancel() + defer breaker.cancel() return breaker.Service.Stop(ctx) } diff --git a/nodebuilder/p2p/opts.go b/nodebuilder/p2p/opts.go index 9501dfe8e1..8e5d714a64 100644 --- a/nodebuilder/p2p/opts.go +++ b/nodebuilder/p2p/opts.go @@ -3,6 +3,7 @@ package p2p import ( "encoding/hex" + "github.com/ipfs/go-blockservice" "github.com/libp2p/go-libp2p/core/crypto" hst "github.com/libp2p/go-libp2p/core/host" "go.uber.org/fx" @@ -34,3 +35,8 @@ func WithP2PKeyStr(key string) fx.Option { func WithHost(hst hst.Host) fx.Option { return fxutil.ReplaceAs(hst, new(HostBase)) } + +// WithBlockService allows to replace the default BlockService. +func WithBlockService(bServ blockservice.BlockService) fx.Option { + return fxutil.ReplaceAs(bServ, new(blockservice.BlockService)) +} diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index f652724d55..95c702c0c0 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -5,16 +5,20 @@ import ( "testing" "time" - mdutils "github.com/ipfs/go-merkledag/test" + "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" + "go.uber.org/fx" - "github.com/celestiaorg/celestia-node/header/headertest" + headerfraud "github.com/celestiaorg/celestia-node/header/headertest/fraud" "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" + "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/eds/byzantine" ) @@ -29,156 +33,114 @@ Steps: 4. Start a FN. 5. Subscribe to a fraud proof and wait when it will be received. 6. Check FN is not synced to 15. -Note: 15 is not available because DASer will be stopped before reaching this height due to receiving a fraud proof. +Note: 15 is not available because DASer/Syncer will be stopped +before reaching this height due to receiving a fraud proof. Another note: this test disables share exchange to speed up test results. +7. Spawn a Light Node(LN) in order to sync a BEFP. +8. Ensure that the BEFP was received. +9. Try to start a Full Node(FN) that contains a BEFP in its store. */ -func TestFraudProofBroadcasting(t *testing.T) { - t.Skip("requires BEFP generation on app side to work") +func TestFraudProofHandling(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) const ( blocks = 15 - blockSize = 2 - blockTime = time.Millisecond * 300 + blockSize = 4 + blockTime = time.Second ) sw := swamp.NewSwamp(t, swamp.WithBlockTime(blockTime)) fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, blockSize, blocks) + set, val := sw.Validators(t) + fMaker := headerfraud.NewFraudMaker(t, 10, []types.PrivValidator{val}, set) + + tmpDir := t.TempDir() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + edsStore, err := eds.NewStore(tmpDir, ds) + require.NoError(t, err) + require.NoError(t, edsStore.Start(ctx)) + t.Cleanup(func() { + _ = edsStore.Stop(ctx) + }) cfg := nodebuilder.DefaultConfig(node.Bridge) - cfg.Share.UseShareExchange = false + // 1. bridge := sw.NewNodeWithConfig( node.Bridge, cfg, - core.WithHeaderConstructFn(headertest.FraudMaker(t, 10, mdutils.Bserv())), + core.WithHeaderConstructFn(fMaker.MakeExtendedHeader(16, edsStore)), + fx.Replace(edsStore), ) - - err := bridge.Start(ctx) + // 2. + err = bridge.Start(ctx) require.NoError(t, err) + // 3. cfg = nodebuilder.DefaultConfig(node.Full) - cfg.Share.UseShareExchange = false addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) require.NoError(t, err) cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - + cfg.Share.UseShareExchange = false store := nodebuilder.MockStore(t, cfg) full := sw.NewNodeWithStore(node.Full, store) + // 4. err = full.Start(ctx) require.NoError(t, err) - // subscribe to fraud proof before node starts helps - // to prevent flakiness when fraud proof is propagating before subscribing on it - subscr, err := full.FraudServ.Subscribe(ctx, byzantine.BadEncoding) + // 5. + subCtx, subCancel := context.WithCancel(ctx) + subscr, err := full.FraudServ.Subscribe(subCtx, byzantine.BadEncoding) require.NoError(t, err) - select { case p := <-subscr: require.Equal(t, 10, int(p.Height())) + subCancel() case <-ctx.Done(): - t.Fatal("fraud proof was not received in time") + subCancel() + t.Fatal("full node did not receive a fraud proof in time") } + // This is an obscure way to check if the Syncer was stopped. // If we cannot get a height header within a timeframe it means the syncer was stopped // FIXME: Eventually, this should be a check on service registry managing and keeping // lifecycles of each Module. - syncCtx, syncCancel := context.WithTimeout(context.Background(), btime) - _, err = full.HeaderServ.WaitForHeight(syncCtx, 100) + // 6. + syncCtx, syncCancel := context.WithTimeout(context.Background(), blockTime*5) + _, err = full.HeaderServ.WaitForHeight(syncCtx, 15) require.ErrorIs(t, err, context.DeadlineExceeded) syncCancel() - sw.StopNode(ctx, full) - - full = sw.NewNodeWithStore(node.Full, store) - - require.Error(t, full.Start(ctx)) - proofs, err := full.FraudServ.Get(ctx, byzantine.BadEncoding) - require.NoError(t, err) - require.NotNil(t, proofs) - require.NoError(t, <-fillDn) -} - -/* -Test-Case: Light node receives a fraud proof using Fraud Sync -Pre-Requisites: -- CoreClient is started by swamp. -Steps: -1. Create a Bridge Node(BN) with broken extended header at height 10. -2. Start a BN. -3. Create a Full Node(FN) with a connection to BN as a trusted peer. -4. Start a FN. -5. Subscribe to a fraud proof and wait when it will be received. -6. Start LN once a fraud proof is received and verified by FN. -7. Wait until LN will be connected to FN and fetch a fraud proof. -Note: this test disables share exchange to speed up test results. -*/ -func TestFraudProofSyncing(t *testing.T) { - t.Skip("requires BEFP generation on app side to work") - - const ( - blocks = 15 - bsize = 2 - btime = time.Millisecond * 300 - ) - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) - t.Cleanup(cancel) - - fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) - cfg := nodebuilder.DefaultConfig(node.Bridge) - cfg.Share.UseShareExchange = false - store := nodebuilder.MockStore(t, cfg) - bridge := sw.NewNodeWithStore( - node.Bridge, - store, - core.WithHeaderConstructFn(headertest.FraudMaker(t, 10, mdutils.Bserv())), - ) - - err := bridge.Start(ctx) - require.NoError(t, err) - addr := host.InfoFromHost(bridge.Host) - addrs, err := peer.AddrInfoToP2pAddrs(addr) - require.NoError(t, err) - - fullCfg := nodebuilder.DefaultConfig(node.Full) - fullCfg.Share.UseShareExchange = false - fullCfg.Header.TrustedPeers = append(fullCfg.Header.TrustedPeers, addrs[0].String()) - full := sw.NewNodeWithStore(node.Full, nodebuilder.MockStore(t, fullCfg)) - - lightCfg := nodebuilder.DefaultConfig(node.Light) - lightCfg.Header.TrustedPeers = append(lightCfg.Header.TrustedPeers, addrs[0].String()) - ln := sw.NewNodeWithStore(node.Light, nodebuilder.MockStore(t, lightCfg)) - require.NoError(t, full.Start(ctx)) + // 7. + cfg = nodebuilder.DefaultConfig(node.Light) + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) + lnStore := nodebuilder.MockStore(t, cfg) + light := sw.NewNodeWithStore(node.Light, lnStore) + require.NoError(t, light.Start(ctx)) - subsFN, err := full.FraudServ.Subscribe(ctx, byzantine.BadEncoding) + // 8. + subCtx, subCancel = context.WithCancel(ctx) + subscr, err = light.FraudServ.Subscribe(subCtx, byzantine.BadEncoding) require.NoError(t, err) - select { - case <-subsFN: + case p := <-subscr: + require.Equal(t, 10, int(p.Height())) + subCancel() case <-ctx.Done(): - t.Fatal("full node didn't get FP in time") + subCancel() + t.Fatal("light node did not receive a fraud proof in time") } - // start LN to enforce syncing logic, not the PubSub's broadcasting - err = ln.Start(ctx) - require.NoError(t, err) - - // internal subscription for the fraud proof is done in order to ensure that light node - // receives the BEFP. - subsLN, err := ln.FraudServ.Subscribe(ctx, byzantine.BadEncoding) - require.NoError(t, err) - - // ensure that the full and light node are connected to speed up test - // alternatively, they would discover each other - err = ln.Host.Connect(ctx, *host.InfoFromHost(full.Host)) + // 9. + fN := sw.NewNodeWithStore(node.Full, store) + require.Error(t, fN.Start(ctx)) + proofs, err := fN.FraudServ.Get(ctx, byzantine.BadEncoding) require.NoError(t, err) + require.NotNil(t, proofs) - select { - case <-subsLN: - case <-ctx.Done(): - t.Fatal("light node didn't get FP in time") - } + sw.StopNode(ctx, bridge) + sw.StopNode(ctx, full) + sw.StopNode(ctx, light) require.NoError(t, <-fillDn) } diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index 58584912be..4f07d96b51 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -16,6 +16,8 @@ import ( mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" "go.uber.org/fx" "golang.org/x/exp/maps" @@ -335,3 +337,15 @@ func (s *Swamp) SetBootstrapper(t *testing.T, bootstrappers ...*nodebuilder.Node s.Bootstrappers = append(s.Bootstrappers, addrs[0]) } } + +// Validators retrieves keys from the app node in order to build the validators. +func (s *Swamp) Validators(t *testing.T) (*types.ValidatorSet, types.PrivValidator) { + privPath := s.cfg.TmConfig.PrivValidatorKeyFile() + statePath := s.cfg.TmConfig.PrivValidatorStateFile() + priv := privval.LoadFilePV(privPath, statePath) + key, err := priv.GetPubKey() + require.NoError(t, err) + validator := types.NewValidator(key, 100) + set := types.NewValidatorSet([]*types.Validator{validator}) + return set, priv +} diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index ddca285f0c..f75e8b619b 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -7,17 +7,21 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/sharetest" ) -func RandByzantineEDS(t *testing.T, size int) *rsmt2d.ExtendedDataSquare { +func RandByzantineEDS(t *testing.T, size int, options ...nmt.Option) *rsmt2d.ExtendedDataSquare { eds := RandEDS(t, size) shares := eds.Flattened() copy(share.GetData(shares[0]), share.GetData(shares[1])) // corrupting eds - eds, err := rsmt2d.ImportExtendedDataSquare(shares, share.DefaultRSMT2DCodec(), wrapper.NewConstructor(uint64(size))) + eds, err := rsmt2d.ImportExtendedDataSquare(shares, + share.DefaultRSMT2DCodec(), + wrapper.NewConstructor(uint64(size), + options...)) require.NoError(t, err, "failure to recompute the extended data square") return eds } diff --git a/share/getters/cascade.go b/share/getters/cascade.go index 63d7713d3d..eb3e969c1c 100644 --- a/share/getters/cascade.go +++ b/share/getters/cascade.go @@ -11,6 +11,7 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" ) var _ share.Getter = (*CascadeGetter)(nil) @@ -130,8 +131,15 @@ func cascadeGetters[V any]( continue } - err = errors.Join(err, getErr) span.RecordError(getErr, trace.WithAttributes(attribute.Int("getter_idx", i))) + var byzantineErr *byzantine.ErrByzantine + if errors.As(getErr, &byzantineErr) { + // short circuit if byzantine error was detected (to be able to handle it correctly + // and create the BEFP) + return zero, byzantineErr + } + + err = errors.Join(err, getErr) if ctx.Err() != nil { return zero, err } diff --git a/share/getters/ipld.go b/share/getters/ipld.go index a892e0fc82..8e11a389bd 100644 --- a/share/getters/ipld.go +++ b/share/getters/ipld.go @@ -16,6 +16,7 @@ import ( "github.com/celestiaorg/celestia-node/libs/utils" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" + "github.com/celestiaorg/celestia-node/share/eds/byzantine" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -82,6 +83,10 @@ func (ig *IPLDGetter) GetEDS(ctx context.Context, root *share.Root) (eds *rsmt2d // convert error to satisfy getter interface contract err = share.ErrNotFound } + var errByz *byzantine.ErrByzantine + if errors.As(err, &errByz) { + return nil, err + } if err != nil { return nil, fmt.Errorf("getter/ipld: failed to retrieve eds: %w", err) } From 42caaee5df1ebe31336e2534b644dcda5e030a14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 14:20:36 +0200 Subject: [PATCH 11/19] chore(deps): bump github.com/ipfs/go-ipld-cbor from 0.0.6 to 0.1.0 (#2622) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/ipfs/go-ipld-cbor](https://github.com/ipfs/go-ipld-cbor) from 0.0.6 to 0.1.0.
Release notes

Sourced from github.com/ipfs/go-ipld-cbor's releases.

v0.1.0

What's Changed

  • allow configuration of ipldStores default hash function by @​whyrusleeping in ipfs/go-ipld-cbor#86
  • adopt PL unified-ci
  • dependency upgrades to latest
  • Minimum go version bump to v1.20.

Full Changelog: https://github.com/ipfs/go-ipld-cbor/compare/v0.0.6...v0.1.0

Commits
  • 2d0b3fb v0.1.0 bump
  • c10cddb chore: update deps
  • f314046 chore: bump go.mod to Go 1.20 and run go fix
  • 16a0369 chore: add or force update version.json
  • da23a18 chore: add or force update .github/workflows/tagpush.yml
  • 8da0e33 chore: add or force update .github/workflows/release-check.yml
  • 5698d0f chore: add or force update .github/workflows/releaser.yml
  • 5f282bb chore: add or force update .github/workflows/go-check.yml
  • 2312e76 chore: add or force update .github/workflows/go-test.yml
  • 1dc9bde chore: delete templates [skip ci] (#91)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/ipfs/go-ipld-cbor&package-manager=go_modules&previous-version=0.0.6&new-version=0.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ryan --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 38b2068178..9206bddaea 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/ipfs/go-blockservice v0.5.1 // down 1 version, 0.5.2 is marked as deprecated and raises alerts github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ipld-cbor v0.0.6 + github.com/ipfs/go-ipld-cbor v0.1.0 github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-merkledag v0.11.0 diff --git a/go.sum b/go.sum index feaba34043..b2afe6be69 100644 --- a/go.sum +++ b/go.sum @@ -1147,8 +1147,9 @@ github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= +github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= From 9415cf0b21c490aa23374b2d503bc3b62148f3f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 14:40:09 +0200 Subject: [PATCH 12/19] chore(deps): bump github.com/ipfs/go-ipld-format from 0.5.0 to 0.6.0 (#2621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/ipfs/go-ipld-format](https://github.com/ipfs/go-ipld-format) from 0.5.0 to 0.6.0.
Release notes

Sourced from github.com/ipfs/go-ipld-format's releases.

v0.6.0

What's Changed

New Contributors

Full Changelog: https://github.com/ipfs/go-ipld-format/compare/v0.5.0...v0.6.0

Commits
  • 608bf9d v0.6.0 bump
  • 93da70a chore: update deps
  • 6ce9a5e fix: stop using the deprecated io/ioutil package
  • 1c2f1ce chore: bump go.mod to Go 1.20 and run go fix
  • ad47740 ci: uci/copy-templates (#83)
  • 706e93c chore: delete templates [skip ci] (#82)
  • c3da866 chore: Update .github/workflows/stale.yml [skip ci]
  • ef09f69 chore: Update .github/workflows/stale.yml [skip ci]
  • 530b600 Bump golang.org/x/crypto from 0.0.0-20190211182817-74369b46fc67 to 0.1.0
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/ipfs/go-ipld-format&package-manager=go_modules&previous-version=0.5.0&new-version=0.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 9206bddaea..7fb3056a9e 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ipld-cbor v0.1.0 - github.com/ipfs/go-ipld-format v0.5.0 + github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-merkledag v0.11.0 github.com/ipld/go-car v0.6.2 diff --git a/go.sum b/go.sum index b2afe6be69..91e56dd9ca 100644 --- a/go.sum +++ b/go.sum @@ -1156,8 +1156,9 @@ github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.3.1/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.5.0 h1:WyEle9K96MSrvr47zZHKKcDxJ/vlpET6PSiQsAFO+Ds= github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= From a31a8081a1e4471d01320aa61e42abc4ebc338de Mon Sep 17 00:00:00 2001 From: Ryan Date: Mon, 4 Sep 2023 14:51:30 +0200 Subject: [PATCH 13/19] feat: local docker telemetry infra (#2296) Supersedes #1990 Adds local telemetry infra to the Makefile. This includes pyroscope, grafana, jaeger, an otel-collector, prometheus, and loki. --------- Co-authored-by: derrandz Co-authored-by: ramin --- Makefile | 12 +++ README.md | 10 ++- docker/telemetry/docker-compose.yml | 89 +++++++++++++++++++ .../telemetry/grafana/datasources/config.yml | 14 +++ docker/telemetry/loki/config.yml | 11 +++ docker/telemetry/otel-collector/config.yml | 32 +++++++ docker/telemetry/prometheus/prometheus.yml | 25 ++++++ docker/telemetry/promtail/config.yml | 29 ++++++ 8 files changed, 218 insertions(+), 4 deletions(-) create mode 100644 docker/telemetry/docker-compose.yml create mode 100644 docker/telemetry/grafana/datasources/config.yml create mode 100644 docker/telemetry/loki/config.yml create mode 100644 docker/telemetry/otel-collector/config.yml create mode 100644 docker/telemetry/prometheus/prometheus.yml create mode 100644 docker/telemetry/promtail/config.yml diff --git a/Makefile b/Makefile index feef6172aa..d4b3d2572e 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ SHELL=/usr/bin/env bash PROJECTNAME=$(shell basename "$(PWD)") +DIR_FULLPATH=$(shell pwd) versioningPath := "github.com/celestiaorg/celestia-node/nodebuilder/node" LDFLAGS=-ldflags="-X '$(versioningPath).buildTime=$(shell date)' -X '$(versioningPath).lastCommit=$(shell git rev-parse HEAD)' -X '$(versioningPath).semanticVersion=$(shell git describe --tags --dirty=-dev 2>/dev/null || git rev-parse --abbrev-ref HEAD)'" ifeq (${PREFIX},) @@ -176,3 +177,14 @@ adr-gen: @echo "--> Generating ADR" @curl -sSL https://raw.githubusercontent.com/celestiaorg/.github/main/adr-template.md > docs/architecture/adr-$(NUM)-$(TITLE).md .PHONY: adr-gen + +## telemetry-infra-up: launches local telemetry infrastructure. This includes grafana, jaeger, loki, pyroscope, and an otel-collector. +## you can access the grafana instance at localhost:3000 and login with admin:admin. +telemetry-infra-up: + PWD="${DIR_FULLPATH}/docker/telemetry" docker-compose -f ./docker/telemetry/docker-compose.yml up +.PHONY: telemetry-infra-up + +## telemetry-infra-down: tears the telemetry infrastructure down. The stores for grafana, prometheus, and loki will persist. +telemetry-infra-down: + PWD="${DIR_FULLPATH}/docker/telemetry" docker-compose -f ./docker/telemetry/docker-compose.yml down +.PHONY: telemetry-infra-down diff --git a/README.md b/README.md index 0711c2d223..89cb67dc15 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ Continue reading [here](https://blog.celestia.org/celestia-mvp-release-data-avai - [API docs](#api-docs) - [Node types](#node-types) - [Run a node](#run-a-node) - - [Environment variables](#environment-variables) + - [Environment variables](#environment-variables) - [Package-specific documentation](#package-specific-documentation) - [Code of Conduct](#code-of-conduct) @@ -55,7 +55,7 @@ For more information on setting up a node and the hardware requirements needed, ## API docs -Celestia-node public API is documented [here](https://docs.celestia.org/category/node-api/). +The celestia-node public API is documented [here](https://docs.celestia.org/category/node-api/). ## Node types @@ -67,7 +67,7 @@ More information can be found [here](https://github.com/celestiaorg/celestia-nod ## Run a node -`` can be `bridge`, `full` or `light`. +`` can be: `bridge`, `full` or `light`. ```sh celestia init @@ -77,7 +77,9 @@ celestia init celestia start ``` -### Environment variables +Please refer to [this guide](https://docs.celestia.org/nodes/celestia-node/) for more information on running a node. + +## Environment variables | Variable | Explanation | Default value | Required | | ----------------------- | ----------------------------------- | ------------- | -------- | diff --git a/docker/telemetry/docker-compose.yml b/docker/telemetry/docker-compose.yml new file mode 100644 index 0000000000..9a95551b0c --- /dev/null +++ b/docker/telemetry/docker-compose.yml @@ -0,0 +1,89 @@ +--- +version: '3.8' + +services: + loki: + container_name: loki + image: grafana/loki:2.6.1 + expose: + - 3100 + ports: + - "3100:3100" + restart: unless-stopped + volumes: + - loki-data:/loki + + promtail: + container_name: promtail + image: grafana/promtail:latest + volumes: + # custom config will read logs from the containers of + # this project + - ${PWD}/promtail:/etc/promtail + # to read container labels and logs + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/containers:/var/lib/docker/containers:ro + depends_on: + - loki + + prometheus: + container_name: prometheus + image: prom/prometheus + ports: + - 9000:9090 + volumes: + - ${PWD}/prometheus:/etc/prometheus + - prometheus-data:/prometheus + # yamllint disable-line rule:line-length + command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml + extra_hosts: + - "host.docker.internal:host-gateway" + + otel-collector: + container_name: otel-collector + image: otel/opentelemetry-collector + command: ["--config=/root/otel-collector/config.yml"] + volumes: + - ${PWD}/otel-collector:/root/otel-collector/ + ports: + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter metrics + - "55681:55681" + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP http receiver + + jaeger: + container_name: jaeger + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" + - "14268:14268" + - "14250:14250" + environment: + - COLLECTOR_OTLP_ENABLED=true + - LOG_LEVEL=debug + + grafana: + container_name: grafana + image: grafana/grafana:latest + user: "0" + ports: + - 3001:3000 + restart: unless-stopped + volumes: + - ${PWD}/grafana/:/etc/grafana/provisioning/ + - ${PWD}/grafana/:/var/lib/grafana/dashboards/ + - grafana-data:/var/lib/grafana + + pyroscope: + image: "pyroscope/pyroscope:latest" + ports: + - "4040:4040" + command: + - "server" + +volumes: + prometheus-data: + loki-data: + grafana-data: diff --git a/docker/telemetry/grafana/datasources/config.yml b/docker/telemetry/grafana/datasources/config.yml new file mode 100644 index 0000000000..a9ad009548 --- /dev/null +++ b/docker/telemetry/grafana/datasources/config.yml @@ -0,0 +1,14 @@ +--- +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + jsonData: + maxLines: 1000 diff --git a/docker/telemetry/loki/config.yml b/docker/telemetry/loki/config.yml new file mode 100644 index 0000000000..467188f09b --- /dev/null +++ b/docker/telemetry/loki/config.yml @@ -0,0 +1,11 @@ +--- +auth_enabled: true + +http_prefix: + +server: + http_listen_address: 0.0.0.0 + grpc_listen_address: 0.0.0.0 + http_listen_port: 3100 + grpc_listen_port: 9095 + log_level: info diff --git a/docker/telemetry/otel-collector/config.yml b/docker/telemetry/otel-collector/config.yml new file mode 100644 index 0000000000..c74c8ca93c --- /dev/null +++ b/docker/telemetry/otel-collector/config.yml @@ -0,0 +1,32 @@ +--- +extensions: + health_check: + +receivers: + otlp: + protocols: + grpc: + # endpoint: "0.0.0.0:4317" + http: + # endpoint: "0.0.0.0:4318" + +exporters: + prometheus: + endpoint: "otel-collector:8889" + send_timestamps: true + metric_expiration: 1800m + jaeger: + endpoint: "jaeger:14250" + tls: + insecure: true + +service: + extensions: [health_check] + pipelines: + metrics: + receivers: [otlp] + exporters: [prometheus] + traces: + receivers: [otlp] + processors: [] + exporters: [jaeger] diff --git a/docker/telemetry/prometheus/prometheus.yml b/docker/telemetry/prometheus/prometheus.yml new file mode 100644 index 0000000000..7cc7395305 --- /dev/null +++ b/docker/telemetry/prometheus/prometheus.yml @@ -0,0 +1,25 @@ +--- +global: + scrape_interval: 15s + scrape_timeout: 10s + evaluation_interval: 15s + +scrape_configs: + - job_name: 'collector' + metrics_path: /metrics + honor_timestamps: true + scrape_interval: 15s + scrape_timeout: 10s + scheme: http + static_configs: + - targets: + - 'otel-collector:8889' + - job_name: 'p2p-metrics' + metrics_path: /metrics + honor_timestamps: true + scrape_interval: 15s + scrape_timeout: 10s + scheme: http + static_configs: + - targets: + - 'host.docker.internal:8890' diff --git a/docker/telemetry/promtail/config.yml b/docker/telemetry/promtail/config.yml new file mode 100644 index 0000000000..95267000a9 --- /dev/null +++ b/docker/telemetry/promtail/config.yml @@ -0,0 +1,29 @@ +# https://grafana.com/docs/loki/latest/clients/promtail/configuration/ +# https://docs.docker.com/engine/api/v1.41/#operation/ContainerList +--- +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: flog_scrape + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + - source_labels: ['__meta_docker_container_log_stream'] + target_label: 'logstream' + - source_labels: ['__meta_docker_container_label_logging_jobname'] + target_label: 'job' From a01e51d74147f2573e7d7791ab07f05f6a46861c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 15:15:44 +0200 Subject: [PATCH 14/19] chore(deps): bump github.com/ipfs/go-block-format from 0.1.2 to 0.2.0 (#2623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/ipfs/go-block-format](https://github.com/ipfs/go-block-format) from 0.1.2 to 0.2.0.
Release notes

Sourced from github.com/ipfs/go-block-format's releases.

v0.2.0

What's Changed

New Contributors

Full Changelog: https://github.com/ipfs/go-block-format/compare/v0.1.2...v0.2.0

Commits
  • f2d9400 v0.2.0 bump
  • ee6647c chore!: go@1.20, update deps, uci, remove gx
  • ffa6dd1 build(deps): bump golang.org/x/sys
  • 441d112 chore: Update .github/workflows/stale.yml [skip ci]
  • 1f4e90a chore: Update .github/workflows/stale.yml [skip ci]
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/ipfs/go-block-format&package-manager=go_modules&previous-version=0.1.2&new-version=0.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ryan --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 7fb3056a9e..c44e35e920 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/hashicorp/golang-lru v1.0.2 github.com/imdario/mergo v0.3.16 github.com/ipfs/boxo v0.11.0 - github.com/ipfs/go-block-format v0.1.2 + github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-blockservice v0.5.1 // down 1 version, 0.5.2 is marked as deprecated and raises alerts github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 diff --git a/go.sum b/go.sum index 91e56dd9ca..31b11a82e0 100644 --- a/go.sum +++ b/go.sum @@ -1040,8 +1040,9 @@ github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/ github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-block-format v0.1.1/go.mod h1:+McEIT+g52p+zz5xGAABGSOKrzmrdX97bc0USBdWPUs= -github.com/ipfs/go-block-format v0.1.2 h1:GAjkfhVx1f4YTODS6Esrj1wt2HhrtwTnhEr+DyPUaJo= github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= From 81d6d3be82ea0be2e94fc0a6aa42e271ae0ec143 Mon Sep 17 00:00:00 2001 From: Evan Forbes <42654277+evan-forbes@users.noreply.github.com> Date: Mon, 4 Sep 2023 11:36:11 -0500 Subject: [PATCH 15/19] chore!: bump celestia-app to v1.0.0-rc13 (#2646) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bumps app to v1.0.0-rc13, core to v1.26.2-tm-v0.34.28, the sdk to v1.18.0-sdk-v0.46.14, and nmt to v0.19.0 per release notes: this is a consensus breaking change that is targetting mocha-4 and arabica-10 besides the already mentioned changes, this bumps app to go 1.21 🎉 and has the simplified programmatic transaction signing repo (we got rid of the KeyringSigner since it was originally designed for tx malleation). This package can be found here, and there are a few (hopefully straightforward) examples here, here, and here. When you all are ready, we'd like to remove the KeyringSigner, and would be delighted to help migrate. --- go.mod | 9 +++++---- go.sum | 29 ++++++++++++++++------------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index c44e35e920..8c1fbb1349 100644 --- a/go.mod +++ b/go.mod @@ -12,12 +12,12 @@ require ( github.com/BurntSushi/toml v1.3.2 github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b github.com/benbjohnson/clock v1.3.5 - github.com/celestiaorg/celestia-app v1.0.0-rc12 + github.com/celestiaorg/celestia-app v1.0.0-rc13 github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 github.com/celestiaorg/go-fraud v0.2.0 github.com/celestiaorg/go-header v0.3.0 github.com/celestiaorg/go-libp2p-messenger v0.2.0 - github.com/celestiaorg/nmt v0.18.1 + github.com/celestiaorg/nmt v0.19.0 github.com/celestiaorg/rsmt2d v0.11.0 github.com/cosmos/cosmos-sdk v0.46.14 github.com/cosmos/cosmos-sdk/api v0.1.0 @@ -310,6 +310,7 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -336,10 +337,10 @@ require ( ) replace ( - github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.17.0-sdk-v0.46.14 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.18.0-sdk-v0.46.14 github.com/filecoin-project/dagstore => github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // broken goleveldb needs to be replaced for the cosmos-sdk and celestia-app github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.26.2-tm-v0.34.28 ) diff --git a/go.sum b/go.sum index 31b11a82e0..d738f67e10 100644 --- a/go.sum +++ b/go.sum @@ -356,12 +356,12 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.0.0-rc12 h1:ko9hPD4oz1UTS4ZqzikGVQ0wXi5+4kEhDb7decx5Ehs= -github.com/celestiaorg/celestia-app v1.0.0-rc12/go.mod h1:vXvKEudUpdJCvUr79qVKKJ0Xo7ofsuU80+Hs9aKGjvU= -github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28 h1:eXS3v26nob8Xs2+flKHVxcTzhzQW44KgTcooR3OxnK4= -github.com/celestiaorg/celestia-core v1.24.0-tm-v0.34.28/go.mod h1:J/GsBjoTZaFz71VeyrLZbG8rV+Rzi6oFEUZUipQ97hQ= -github.com/celestiaorg/cosmos-sdk v1.17.0-sdk-v0.46.14 h1:PckXGxLJjXv97VO3xS8NPHN5oO83X5nvJLbc/4s8jUM= -github.com/celestiaorg/cosmos-sdk v1.17.0-sdk-v0.46.14/go.mod h1:70Go8qNy7YAb1PUcHCChRHNX2ke7c9jgUIEklUX+Mac= +github.com/celestiaorg/celestia-app v1.0.0-rc13 h1:BM9lBJw+RcAiFIUmVDd3XBYyXV9rKJBi8mDqc2wok1o= +github.com/celestiaorg/celestia-app v1.0.0-rc13/go.mod h1:JYu6i1NxJw26TVZ+XSllUdnw0Fw3nGNk5f3wm6RIcys= +github.com/celestiaorg/celestia-core v1.26.2-tm-v0.34.28 h1:2efXQaggLFknz0wQufr4nUEz5G7pSVHS1j7NuJDsvII= +github.com/celestiaorg/celestia-core v1.26.2-tm-v0.34.28/go.mod h1:++dNzzzjP9jYg+NopN9G8sg1HEZ58lv1TPtg71evZ0E= +github.com/celestiaorg/cosmos-sdk v1.18.0-sdk-v0.46.14 h1:dDfoQJOlVNj4HufJ1lBLTo2k3/L/255MIiKmEQziDmw= +github.com/celestiaorg/cosmos-sdk v1.18.0-sdk-v0.46.14/go.mod h1:kkdiHo/zG6ar80730+bG1owdMAQXrGp4utFu7mbfADo= github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403/go.mod h1:cCGM1UoMvyTk8k62mkc+ReVu8iHBCtSBAAL4wYU7KEI= github.com/celestiaorg/go-ds-badger4 v0.0.0-20230712104058-7ede1c814ac5 h1:MJgXvhJP1Au8rXTvMMlBXodu9jplEK1DxiLtMnEphOs= @@ -376,8 +376,8 @@ github.com/celestiaorg/go-verifcid v0.0.1-lazypatch h1:9TSe3w1cmJmbWlweCwCTIZkan github.com/celestiaorg/go-verifcid v0.0.1-lazypatch/go.mod h1:kXPYu0XqTNUKWA1h3M95UHjUqBzDwXVVt/RXZDjKJmQ= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= -github.com/celestiaorg/nmt v0.18.1 h1:zU3apzW4y0fs0ilQA74XnEYW8FvRv0CUK2LXK66L3rA= -github.com/celestiaorg/nmt v0.18.1/go.mod h1:0l8q6UYRju1xNrxtvV6NwPdW3lfsN6KuZ0htRnModdc= +github.com/celestiaorg/nmt v0.19.0 h1:9VXFeI/gt+q8h5HeCE0RjXJhOxsFzxJUjHrkvF9CMYE= +github.com/celestiaorg/nmt v0.19.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= github.com/celestiaorg/quantum-gravity-bridge v1.3.0 h1:9zPIp7w1FWfkPnn16y3S4FpFLnQtS7rm81CUVcHEts0= github.com/celestiaorg/quantum-gravity-bridge v1.3.0/go.mod h1:6WOajINTDEUXpSj5UZzod16UZ96ZVB/rFNKyM+Mt1gI= github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= @@ -2091,8 +2091,9 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= @@ -2147,8 +2148,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= @@ -2369,6 +2370,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 h1:2PunuO5SbkN5MhCbuHCd3tC6qrcaj+uDAkX/qBU5BAs= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1/go.mod h1:q8+Tha+5LThjeSU8BW93uUC5w5/+DnYHMKBMpRCsui0= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= @@ -2861,8 +2864,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From a3220f4503f7b0666b1860148e910658fe130d85 Mon Sep 17 00:00:00 2001 From: ramin Date: Tue, 5 Sep 2023 08:17:45 +0100 Subject: [PATCH 16/19] fix(blob): typo, sumitter -> submitter (#2648) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Overview - simple change in `blob/service.go` where a typo of sumitter existed in the `Service` struct, renamed to submitter ## Checklist - [√] New and updated code has appropriate documentation - [√] New and updated code has new and/or updated testing - [ ] Required CI checks are passing - [n/a] Visual proof for any user facing features like CLI or documentation updates - [n/a] Linked issues closed with keywords --------- Co-authored-by: Ryan --- blob/service.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/blob/service.go b/blob/service.go index da14fc06c7..31cd259efc 100644 --- a/blob/service.go +++ b/blob/service.go @@ -32,7 +32,7 @@ type Submitter interface { type Service struct { // accessor dials the given celestia-core endpoint to submit blobs. - blobSumitter Submitter + blobSubmitter Submitter // shareGetter retrieves the EDS to fetch all shares from the requested header. shareGetter share.Getter // headerGetter fetches header by the provided height @@ -45,9 +45,9 @@ func NewService( headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error), ) *Service { return &Service{ - blobSumitter: submitter, - shareGetter: getter, - headerGetter: headerGetter, + blobSubmitter: submitter, + shareGetter: getter, + headerGetter: headerGetter, } } @@ -58,7 +58,7 @@ func NewService( func (s *Service) Submit(ctx context.Context, blobs []*Blob) (uint64, error) { log.Debugw("submitting blobs", "amount", len(blobs)) - resp, err := s.blobSumitter.SubmitPayForBlob(ctx, types.OneInt().Neg(), 0, blobs) + resp, err := s.blobSubmitter.SubmitPayForBlob(ctx, types.OneInt().Neg(), 0, blobs) if err != nil { return 0, err } From f9cf180ab4eb8cac0ffa6ac3b9723d4c77799e3a Mon Sep 17 00:00:00 2001 From: rene <41963722+renaynay@users.noreply.github.com> Date: Tue, 5 Sep 2023 10:22:44 +0200 Subject: [PATCH 17/19] chore(nodebuilder/tests): Clean up integration tests in `p2p_test` (#2166) Cleans up swamp tests in `p2p_test.go`. Based on #2162 --- nodebuilder/tests/p2p_test.go | 190 ++++++++++++++------------------- nodebuilder/tests/sync_test.go | 1 + 2 files changed, 79 insertions(+), 112 deletions(-) diff --git a/nodebuilder/tests/p2p_test.go b/nodebuilder/tests/p2p_test.go index 613dface94..083712dfdd 100644 --- a/nodebuilder/tests/p2p_test.go +++ b/nodebuilder/tests/p2p_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -18,22 +17,22 @@ import ( ) /* -Test-Case: Full/Light Nodes connection to Bridge as a Bootstapper +Test-Case: Full/Light Nodes connection to Bridge as a Bootstrapper Steps: 1. Create a Bridge Node(BN) 2. Start a BN -3. Create full/light nodes with bridge node as bootsrapped peer +3. Create full/light nodes with bridge node as bootstrap peer 4. Start full/light nodes 5. Check that nodes are connected to bridge */ -func TestUseBridgeNodeAsBootstraper(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - +func TestBridgeNodeAsBootstrapper(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) + sw := swamp.NewSwamp(t) + + // create and start BN + bridge := sw.NewBridgeNode() err := bridge.Start(ctx) require.NoError(t, err) @@ -41,39 +40,14 @@ func TestUseBridgeNodeAsBootstraper(t *testing.T) { full := sw.NewFullNode(nodebuilder.WithBootstrappers([]peer.AddrInfo{*addr})) light := sw.NewLightNode(nodebuilder.WithBootstrappers([]peer.AddrInfo{*addr})) - nodes := []*nodebuilder.Node{full, light} - for index := range nodes { - require.NoError(t, nodes[index].Start(ctx)) - assert.Equal(t, *addr, nodes[index].Bootstrappers[0]) - assert.True(t, nodes[index].Host.Network().Connectedness(addr.ID) == network.Connected) - } -} - -/* -Test-Case: Add peer to blacklist -Steps: -1. Create a Full Node(BN) -2. Start a FN -3. Create a Light Node(LN) -5. Start a LN -6. Explicitly block FN id by LN -7. Check FN is allowed to dial with LN -8. Check LN is not allowed to dial with FN -*/ -func TestAddPeerToBlackList(t *testing.T) { - sw := swamp.NewSwamp(t) - full := sw.NewFullNode() - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) - t.Cleanup(cancel) - require.NoError(t, full.Start(ctx)) - addr := host.InfoFromHost(full.Host) - light := sw.NewLightNode() - require.NoError(t, light.Start(ctx)) - require.NoError(t, light.ConnGater.BlockPeer(addr.ID)) - - require.True(t, full.ConnGater.InterceptPeerDial(host.InfoFromHost(light.Host).ID)) - require.False(t, light.ConnGater.InterceptPeerDial(addr.ID)) + for _, nd := range []*nodebuilder.Node{full, light} { + // start node and ensure that BN is correctly set as bootstrapper + require.NoError(t, nd.Start(ctx)) + assert.Equal(t, *addr, nd.Bootstrappers[0]) + // ensure that node is actually connected to BN + assert.True(t, nd.Host.Network().Connectedness(addr.ID) == network.Connected) + } } /* @@ -86,131 +60,123 @@ Steps: 5. Ensure that nodes are connected to bridge 6. Wait until light will find full node 7. Check that full and light nodes are connected to each other - 8. Stop FN and ensure that it's not connected to LN */ -func TestBootstrapNodesFromBridgeNode(t *testing.T) { +func TestFullDiscoveryViaBootstrapper(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + const defaultTimeInterval = time.Second * 2 + sw := swamp.NewSwamp(t) + + // create and start a BN cfg := nodebuilder.DefaultConfig(node.Bridge) - const defaultTimeInterval = time.Second * 10 setTimeInterval(cfg, defaultTimeInterval) - bridge := sw.NewNodeWithConfig(node.Bridge, cfg) - - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) - t.Cleanup(cancel) - err := bridge.Start(ctx) require.NoError(t, err) - bridgeAddr := host.InfoFromHost(bridge.Host) + // use BN as the bootstrapper + bootstrapper := host.InfoFromHost(bridge.Host) + + // create FN with BN as bootstrapper cfg = nodebuilder.DefaultConfig(node.Full) setTimeInterval(cfg, defaultTimeInterval) full := sw.NewNodeWithConfig( node.Full, cfg, - nodebuilder.WithBootstrappers([]peer.AddrInfo{*bridgeAddr}), + nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapper}), ) + // create LN with BN as bootstrapper cfg = nodebuilder.DefaultConfig(node.Light) setTimeInterval(cfg, defaultTimeInterval) - cfg.P2P.PeerExchange = true light := sw.NewNodeWithConfig( node.Light, cfg, - nodebuilder.WithBootstrappers([]peer.AddrInfo{*bridgeAddr}), + nodebuilder.WithBootstrappers([]peer.AddrInfo{*bootstrapper}), ) + + // start FN and LN and ensure they are both connected to BN as a bootstrapper nodes := []*nodebuilder.Node{full, light} - ch := make(chan struct{}) - sub, err := light.Host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - defer sub.Close() for index := range nodes { require.NoError(t, nodes[index].Start(ctx)) - assert.Equal(t, *bridgeAddr, nodes[index].Bootstrappers[0]) - assert.True(t, nodes[index].Host.Network().Connectedness(bridgeAddr.ID) == network.Connected) + assert.Equal(t, *bootstrapper, nodes[index].Bootstrappers[0]) + assert.True(t, nodes[index].Host.Network().Connectedness(bootstrapper.ID) == network.Connected) } - addrFull := host.InfoFromHost(full.Host) - go func() { - for e := range sub.Out() { - connStatus := e.(event.EvtPeerConnectednessChanged) - if connStatus.Peer == full.Host.ID() && connStatus.Connectedness == network.NotConnected { - ch <- struct{}{} - } + + for { + if ctx.Err() != nil { + t.Fatal(ctx.Err()) + } + if light.Host.Network().Connectedness(host.InfoFromHost(full.Host).ID) == network.Connected { + // LN discovered FN successfully and is now connected + break } - }() - - // ensure that the light node is connected to the full node - assert.True(t, light.Host.Network().Connectedness(addrFull.ID) == network.Connected) - - sw.Disconnect(t, light, full) - require.NoError(t, full.Stop(ctx)) - select { - case <-ctx.Done(): - t.Fatal("peer was not disconnected") - case <-ch: - assert.True(t, light.Host.Network().Connectedness(addrFull.ID) == network.NotConnected) } } /* -Test-Case: Restart full node discovery after one node is disconnected +Test-Case: Full node discovery of disconnected full nodes Steps: 1. Create a Bridge Node(BN) 2. Start a BN -3. Create 2 full nodes with bridge node as bootstrapper peer and start them -4. Check that nodes are connected to each other -5. Create one more node with disabled discovery -6. Disconnect FNs from each other -7. Check that the last FN is connected to one of the nodes +3. Create 2 FNs with bridge node as bootstrapper peer and start them +4. Check that the FNs discover each other +5. Disconnect the FNs +6. Create one more node with discovery process disabled (however advertisement is still enabled) +7. Check that the FN with discovery disabled is still found by the other two FNs *NOTE*: this test will take some time because it relies on several cycles of peer discovery */ func TestRestartNodeDiscovery(t *testing.T) { - sw := swamp.NewSwamp(t) - cfg := nodebuilder.DefaultConfig(node.Bridge) - const defaultTimeInterval = time.Second * 2 - const fullNodes = 2 - - setTimeInterval(cfg, defaultTimeInterval) - cfg.Share.Discovery.PeersLimit = fullNodes - bridge := sw.NewNodeWithConfig(node.Bridge, cfg) - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) + const ( + defaultTimeInterval = time.Second * 2 + numFulls = 2 + ) + + sw := swamp.NewSwamp(t) + + // create and start a BN as a bootstrapper + fullCfg := nodebuilder.DefaultConfig(node.Bridge) + setTimeInterval(fullCfg, defaultTimeInterval) + bridge := sw.NewNodeWithConfig(node.Bridge, fullCfg) err := bridge.Start(ctx) require.NoError(t, err) + bridgeAddr := host.InfoFromHost(bridge.Host) - nodes := make([]*nodebuilder.Node, fullNodes) - cfg = nodebuilder.DefaultConfig(node.Full) - setTimeInterval(cfg, defaultTimeInterval) - cfg.Share.Discovery.PeersLimit = fullNodes + fullCfg = nodebuilder.DefaultConfig(node.Full) + setTimeInterval(fullCfg, defaultTimeInterval) nodesConfig := nodebuilder.WithBootstrappers([]peer.AddrInfo{*bridgeAddr}) - for index := 0; index < fullNodes; index++ { - nodes[index] = sw.NewNodeWithConfig(node.Full, cfg, nodesConfig) - } - for index := 0; index < fullNodes; index++ { + // create two FNs and start them, ensuring they are connected to BN as + // bootstrapper + nodes := make([]*nodebuilder.Node, numFulls) + for index := 0; index < numFulls; index++ { + nodes[index] = sw.NewNodeWithConfig(node.Full, fullCfg, nodesConfig) require.NoError(t, nodes[index].Start(ctx)) assert.True(t, nodes[index].Host.Network().Connectedness(bridgeAddr.ID) == network.Connected) } - // ensure full nodes are connected to each other + // ensure FNs are connected to each other require.True(t, nodes[0].Host.Network().Connectedness(nodes[1].Host.ID()) == network.Connected) - // create one more node with disabled discovery - cfg = nodebuilder.DefaultConfig(node.Full) - setTimeInterval(cfg, defaultTimeInterval) - cfg.Share.Discovery.PeersLimit = 0 - node := sw.NewNodeWithConfig(node.Full, cfg, nodesConfig) - connectSub, err := nodes[0].Host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) - defer connectSub.Close() + // disconnect the FNs sw.Disconnect(t, nodes[0], nodes[1]) - require.NoError(t, node.Start(ctx)) - // ensure that the last node is connected to one of the nodes - require.True(t, nodes[0].Host.Network().Connectedness(node.Host.ID()) == network.Connected) + // create and start one more FN with disabled discovery + fullCfg.Share.Discovery.PeersLimit = 0 + disabledDiscoveryFN := sw.NewNodeWithConfig(node.Full, fullCfg, nodesConfig) + err = disabledDiscoveryFN.Start(ctx) + require.NoError(t, err) + + // ensure that the FN with disabled discovery is discovered by both of the + // running FNs that have discovery enabled + require.True(t, nodes[0].Host.Network().Connectedness(disabledDiscoveryFN.Host.ID()) == network.Connected) + require.True(t, nodes[1].Host.Network().Connectedness(disabledDiscoveryFN.Host.ID()) == network.Connected) } func setTimeInterval(cfg *nodebuilder.Config, interval time.Duration) { diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go index 234556a3aa..65db1332ff 100644 --- a/nodebuilder/tests/sync_test.go +++ b/nodebuilder/tests/sync_test.go @@ -297,6 +297,7 @@ func TestSyncLightAgainstFull(t *testing.T) { require.NoError(t, err) _, err = full.HeaderServ.WaitForHeight(ctx, bridgeHead.Height()) require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) // reset suite bootstrapper list and set full node as a bootstrapper for // LN to connect to From da8244e00fc434139fb1fd21414683660553c239 Mon Sep 17 00:00:00 2001 From: Ryan Date: Tue, 5 Sep 2023 11:06:33 +0200 Subject: [PATCH 18/19] refactor(blob): removing jsonProof (#2652) Closes #2634 . nmt.Proof has json serialization built in now so jsonProof is unnecessary and results in missing fields from the original struct. Only adds additional fields to the JSON response so not breaking --- blob/blob.go | 37 ------------------------------------- blob/service_test.go | 5 +++-- 2 files changed, 3 insertions(+), 39 deletions(-) diff --git a/blob/blob.go b/blob/blob.go index e9ad2b6255..c5fd04c782 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -12,7 +12,6 @@ import ( "github.com/celestiaorg/nmt" "github.com/celestiaorg/celestia-node/share" - "github.com/celestiaorg/celestia-node/share/ipld" ) // Commitment is a Merkle Root of the subtree built from shares of the Blob. @@ -62,42 +61,6 @@ func (p Proof) equal(input Proof) error { return nil } -type jsonProof struct { - Start int `json:"start"` - End int `json:"end"` - Nodes [][]byte `json:"nodes"` -} - -func (p *Proof) MarshalJSON() ([]byte, error) { - proofs := make([]jsonProof, 0, p.Len()) - for _, pp := range *p { - proofs = append(proofs, jsonProof{ - Start: pp.Start(), - End: pp.End(), - Nodes: pp.Nodes(), - }) - } - - return json.Marshal(proofs) -} - -func (p *Proof) UnmarshalJSON(data []byte) error { - var proofs []jsonProof - err := json.Unmarshal(data, &proofs) - if err != nil { - return err - } - - nmtProofs := make([]*nmt.Proof, len(proofs)) - for i, jProof := range proofs { - nmtProof := nmt.NewInclusionProof(jProof.Start, jProof.End, jProof.Nodes, ipld.NMTIgnoreMaxNamespace) - nmtProofs[i] = &nmtProof - } - - *p = nmtProofs - return nil -} - // Blob represents any application-specific binary data that anyone can submit to Celestia. type Blob struct { types.Blob `json:"blob"` diff --git a/blob/service_test.go b/blob/service_test.go index 1dcabe7129..a45fe631af 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "crypto/sha256" + "encoding/json" "testing" "time" @@ -272,14 +273,14 @@ func TestBlobService_Get(t *testing.T) { doFn: func() (interface{}, error) { proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) require.NoError(t, err) - return proof.MarshalJSON() + return json.Marshal(proof) }, expectedResult: func(i interface{}, err error) { require.NoError(t, err) jsonData, ok := i.([]byte) require.True(t, ok) var proof Proof - require.NoError(t, proof.UnmarshalJSON(jsonData)) + require.NoError(t, json.Unmarshal(jsonData, &proof)) newProof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) require.NoError(t, err) From 456d169582dc02aafb7d5df891da5973d54a20b7 Mon Sep 17 00:00:00 2001 From: Ryan Date: Tue, 5 Sep 2023 11:43:52 +0200 Subject: [PATCH 19/19] feat(eds/store): remove corrupted blocks from store (#2625) Related to #2335 If an OpShardFail is found or corruption is detected from GetSharesByNamespace, the shard is removed --- nodebuilder/share/constructors.go | 18 +++++++ nodebuilder/share/module.go | 79 ++++++++++++++++++++----------- share/eds/accessor_cache.go | 8 ++++ share/eds/metrics.go | 20 ++++++++ share/eds/store.go | 50 ++++++++++++++----- share/eds/store_test.go | 38 +++++++++++++++ share/getters/getter_test.go | 29 ++++++++++++ share/getters/shrex_test.go | 3 +- share/getters/store.go | 12 +++++ share/p2p/peers/manager.go | 28 ++++++++--- share/p2p/peers/manager_test.go | 5 +- share/p2p/peers/options.go | 17 +++++++ share/share.go | 14 ++++++ 13 files changed, 269 insertions(+), 52 deletions(-) diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index a1b7e39713..b962038d17 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -87,6 +87,24 @@ func lightGetter( return getters.NewCascadeGetter(cascade) } +// ShrexGetter is added to bridge nodes for the case that a shard is removed +// after detected shard corruption. This ensures the block is fetched and stored +// by shrex the next time the data is retrieved (meaning shard recovery is +// manual after corruption is detected). +func bridgeGetter( + store *eds.Store, + storeGetter *getters.StoreGetter, + shrexGetter *getters.ShrexGetter, + cfg Config, +) share.Getter { + var cascade []share.Getter + cascade = append(cascade, storeGetter) + if cfg.UseShareExchange { + cascade = append(cascade, getters.NewTeeGetter(shrexGetter, store)) + } + return getters.NewCascadeGetter(cascade) +} + func fullGetter( store *eds.Store, storeGetter *getters.StoreGetter, diff --git a/nodebuilder/share/module.go b/nodebuilder/share/module.go index b924cf8167..f7a84bf526 100644 --- a/nodebuilder/share/module.go +++ b/nodebuilder/share/module.go @@ -5,9 +5,12 @@ import ( "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/p2p/net/conngater" "go.uber.org/fx" - "github.com/celestiaorg/celestia-node/libs/fxutil" + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" modp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" "github.com/celestiaorg/celestia-node/share" @@ -48,6 +51,33 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option ), ) + shrexGetterComponents := fx.Options( + fx.Provide(func() peers.Parameters { + return cfg.PeerManagerParams + }), + fx.Provide( + func(host host.Host, network modp2p.Network) (*shrexnd.Client, error) { + cfg.ShrExNDParams.WithNetworkID(network.String()) + return shrexnd.NewClient(cfg.ShrExNDParams, host) + }, + ), + fx.Provide( + func(host host.Host, network modp2p.Network) (*shrexeds.Client, error) { + cfg.ShrExEDSParams.WithNetworkID(network.String()) + return shrexeds.NewClient(cfg.ShrExEDSParams, host) + }, + ), + fx.Provide(fx.Annotate( + getters.NewShrexGetter, + fx.OnStart(func(ctx context.Context, getter *getters.ShrexGetter) error { + return getter.Start(ctx) + }), + fx.OnStop(func(ctx context.Context, getter *getters.ShrexGetter) error { + return getter.Stop(ctx) + }), + )), + ) + bridgeAndFullComponents := fx.Options( fx.Provide(getters.NewStoreGetter), fx.Invoke(func(edsSrv *shrexeds.Server, ndSrc *shrexnd.Server) {}), @@ -112,32 +142,25 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option }), ) - shrexGetterComponents := fx.Options( - fx.Provide(func() peers.Parameters { - return cfg.PeerManagerParams - }), - fx.Provide(peers.NewManager), - fx.Provide( - func(host host.Host, network modp2p.Network) (*shrexnd.Client, error) { - cfg.ShrExNDParams.WithNetworkID(network.String()) - return shrexnd.NewClient(cfg.ShrExNDParams, host) - }, - ), + peerManagerWithShrexPools := fx.Options( fx.Provide( - func(host host.Host, network modp2p.Network) (*shrexeds.Client, error) { - cfg.ShrExEDSParams.WithNetworkID(network.String()) - return shrexeds.NewClient(cfg.ShrExEDSParams, host) + func( + params peers.Parameters, + discovery *disc.Discovery, + host host.Host, + connGater *conngater.BasicConnectionGater, + shrexSub *shrexsub.PubSub, + headerSub libhead.Subscriber[*header.ExtendedHeader], + ) (*peers.Manager, error) { + return peers.NewManager( + params, + discovery, + host, + connGater, + peers.WithShrexSubPools(shrexSub, headerSub), + ) }, ), - fx.Provide(fx.Annotate( - getters.NewShrexGetter, - fx.OnStart(func(ctx context.Context, getter *getters.ShrexGetter) error { - return getter.Start(ctx) - }), - fx.OnStop(func(ctx context.Context, getter *getters.ShrexGetter) error { - return getter.Stop(ctx) - }), - )), ) switch tp { @@ -145,10 +168,10 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option return fx.Module( "share", baseComponents, + fx.Provide(peers.NewManager), bridgeAndFullComponents, - fxutil.ProvideAs(func(getter *getters.StoreGetter) share.Getter { - return getter - }), + shrexGetterComponents, + fx.Provide(bridgeGetter), fx.Invoke(func(lc fx.Lifecycle, sub *shrexsub.PubSub) error { lc.Append(fx.Hook{ OnStart: sub.Start, @@ -160,6 +183,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option case node.Full: return fx.Module( "share", + peerManagerWithShrexPools, baseComponents, bridgeAndFullComponents, shrexGetterComponents, @@ -175,6 +199,7 @@ func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option light.WithSampleAmount(cfg.LightAvailability.SampleAmount), } }), + peerManagerWithShrexPools, shrexGetterComponents, fx.Invoke(ensureEmptyEDSInBS), fx.Provide(getters.NewIPLDGetter), diff --git a/share/eds/accessor_cache.go b/share/eds/accessor_cache.go index cd0f0537fa..9f70178be6 100644 --- a/share/eds/accessor_cache.go +++ b/share/eds/accessor_cache.go @@ -69,6 +69,14 @@ func (bc *blockstoreCache) evictFn() func(_ interface{}, val interface{}) { } } +func (bc *blockstoreCache) Remove(key shard.Key) bool { + lk := &bc.stripedLocks[shardKeyToStriped(key)] + lk.Lock() + defer lk.Unlock() + + return bc.cache.Remove(key) +} + // Get retrieves the blockstore for a given shard key from the cache. If the blockstore is not in // the cache, it returns an errCacheMiss func (bc *blockstoreCache) Get(shardContainingCid shard.Key) (*accessorWithBlockstore, error) { diff --git a/share/eds/metrics.go b/share/eds/metrics.go index 1f430bf688..9d4b2a53ef 100644 --- a/share/eds/metrics.go +++ b/share/eds/metrics.go @@ -47,6 +47,8 @@ type metrics struct { listTime metric.Float64Histogram getAccessorTime metric.Float64Histogram + shardFailureCount metric.Int64Counter + longOpTime metric.Float64Histogram gcTime metric.Float64Histogram } @@ -106,6 +108,12 @@ func (s *Store) WithMetrics() error { return err } + shardFailureCount, err := meter.Int64Counter("eds_store_shard_failure_counter", + metric.WithDescription("eds store OpShardFail counter")) + if err != nil { + return err + } + longOpTime, err := meter.Float64Histogram("eds_store_long_operation_time_histogram", metric.WithDescription("eds store long operation time histogram(s)")) if err != nil { @@ -153,6 +161,7 @@ func (s *Store) WithMetrics() error { hasTime: hasTime, listTime: listTime, getAccessorTime: getAccessorTime, + shardFailureCount: shardFailureCount, longOpTime: longOpTime, gcTime: gcTime, } @@ -170,6 +179,17 @@ func (m *metrics) observeGCtime(ctx context.Context, dur time.Duration, failed b attribute.Bool(failedKey, failed))) } +func (m *metrics) observeShardFailure(ctx context.Context, shardKey string) { + if m == nil { + return + } + if ctx.Err() != nil { + ctx = context.Background() + } + + m.shardFailureCount.Add(ctx, 1, metric.WithAttributes(attribute.String("shard_key", shardKey))) +} + func (m *metrics) observePut(ctx context.Context, dur time.Duration, result putResult, size uint) { if m == nil { return diff --git a/share/eds/store.go b/share/eds/store.go index 24a96c9fe4..fa9a7f7c7e 100644 --- a/share/eds/store.go +++ b/share/eds/store.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "context" - "encoding/hex" "errors" "fmt" "io" @@ -64,6 +63,8 @@ type Store struct { // lastGCResult is only stored on the store for testing purposes. lastGCResult atomic.Pointer[dagstore.GCResult] + shardFailures chan dagstore.ShardResult + metrics *metrics } @@ -92,6 +93,8 @@ func NewStore(basepath string, ds datastore.Batching) (*Store, error) { if err != nil { return nil, fmt.Errorf("failed to create index: %w", err) } + + failureChan := make(chan dagstore.ShardResult) dagStore, err := dagstore.NewDAGStore( dagstore.Config{ TransientsDir: basepath + transientsPath, @@ -99,6 +102,7 @@ func NewStore(basepath string, ds datastore.Batching) (*Store, error) { Datastore: ds, MountRegistry: r, TopLevelIndex: invertedIdx, + FailureCh: failureChan, }, ) if err != nil { @@ -111,13 +115,14 @@ func NewStore(basepath string, ds datastore.Batching) (*Store, error) { } store := &Store{ - basepath: basepath, - dgstr: dagStore, - carIdx: fsRepo, - invertedIdx: invertedIdx, - gcInterval: defaultGCInterval, - mounts: r, - cache: cache, + basepath: basepath, + dgstr: dagStore, + carIdx: fsRepo, + invertedIdx: invertedIdx, + gcInterval: defaultGCInterval, + mounts: r, + shardFailures: failureChan, + cache: cache, } store.bs = newBlockstore(store, cache, ds) return store, nil @@ -139,6 +144,8 @@ func (s *Store) Start(ctx context.Context) error { if s.gcInterval != 0 { go s.gc(runCtx) } + + go s.watchForFailures(runCtx) return nil } @@ -172,6 +179,23 @@ func (s *Store) gc(ctx context.Context) { } } +func (s *Store) watchForFailures(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case res := <-s.shardFailures: + log.Errorw("removing shard after failure", "key", res.Key, "err", res.Error) + s.metrics.observeShardFailure(ctx, res.Key.String()) + k := share.MustDataHashFromString(res.Key.String()) + err := s.Remove(ctx, k) + if err != nil { + log.Errorw("failed to remove shard after failure", "key", res.Key, "err", err) + } + } + } +} + // Put stores the given data square with DataRoot's hash as a key. // // The square is verified on the Exchange level, and Put only stores the square, trusting it. @@ -437,6 +461,11 @@ func (s *Store) Remove(ctx context.Context, root share.DataHash) error { func (s *Store) remove(ctx context.Context, root share.DataHash) (err error) { key := root.String() + + // Remove from accessor cache, so that existing readers are closed and + // DestroyShard can be executed. + s.cache.Remove(shard.KeyFromString(key)) + ch := make(chan dagstore.ShardResult, 1) err = s.dgstr.DestroyShard(ctx, shard.KeyFromString(key), ch, dagstore.DestroyOpts{}) if err != nil { @@ -535,10 +564,7 @@ func (s *Store) list() ([]share.DataHash, error) { shards := s.dgstr.AllShardsInfo() hashes := make([]share.DataHash, 0, len(shards)) for shrd := range shards { - hash, err := hex.DecodeString(shrd.String()) - if err != nil { - return nil, err - } + hash := share.MustDataHashFromString(shrd.String()) hashes = append(hashes, hash) } return hashes, nil diff --git a/share/eds/store_test.go b/share/eds/store_test.go index 4b263e7062..4f1d7f4c8b 100644 --- a/share/eds/store_test.go +++ b/share/eds/store_test.go @@ -122,6 +122,44 @@ func TestEDSStore(t *testing.T) { assert.ErrorContains(t, err, "no such file or directory") }) + t.Run("Remove after OpShardFail", func(t *testing.T) { + eds, dah := randomEDS(t) + + err = edsStore.Put(ctx, dah.Hash(), eds) + require.NoError(t, err) + + // assert that shard now exists + ok, err := edsStore.Has(ctx, dah.Hash()) + assert.NoError(t, err) + assert.True(t, ok) + + // assert that file now exists + path := edsStore.basepath + blocksPath + dah.String() + _, err = os.Stat(path) + assert.NoError(t, err) + + err = os.Remove(path) + assert.NoError(t, err) + + _, err = edsStore.GetCAR(ctx, dah.Hash()) + assert.Error(t, err) + + ticker := time.NewTicker(time.Millisecond * 100) + defer ticker.Stop() + for { + select { + case <-ticker.C: + has, err := edsStore.Has(ctx, dah.Hash()) + if err == nil && !has { + // shard no longer exists after OpShardFail was detected from GetCAR call + return + } + case <-ctx.Done(): + t.Fatal("timeout waiting for shard to be removed") + } + } + }) + t.Run("Has", func(t *testing.T) { eds, dah := randomEDS(t) diff --git a/share/getters/getter_test.go b/share/getters/getter_test.go index 02e075459b..d19d0cf174 100644 --- a/share/getters/getter_test.go +++ b/share/getters/getter_test.go @@ -2,6 +2,7 @@ package getters import ( "context" + "os" "testing" "time" @@ -153,6 +154,34 @@ func TestStoreGetter(t *testing.T) { _, err = sg.GetSharesByNamespace(ctx, &root, namespace) require.ErrorIs(t, err, share.ErrNotFound) }) + + t.Run("GetSharesFromNamespace removes corrupted shard", func(t *testing.T) { + randEds, namespace, dah := randomEDSWithDoubledNamespace(t, 4) + err = edsStore.Put(ctx, dah.Hash(), randEds) + require.NoError(t, err) + + // available + shares, err := sg.GetSharesByNamespace(ctx, &dah, namespace) + require.NoError(t, err) + require.NoError(t, shares.Verify(&dah, namespace)) + assert.Len(t, shares.Flatten(), 2) + + // 'corrupt' existing CAR by overwriting with a random EDS + f, err := os.OpenFile(tmpDir+"/blocks/"+dah.String(), os.O_WRONLY, 0644) + require.NoError(t, err) + edsToOverwriteWith, dah := randomEDS(t) + err = eds.WriteEDS(ctx, edsToOverwriteWith, f) + require.NoError(t, err) + + shares, err = sg.GetSharesByNamespace(ctx, &dah, namespace) + require.ErrorIs(t, err, share.ErrNotFound) + require.Nil(t, shares) + + // corruption detected, shard is removed + has, err := edsStore.Has(ctx, dah.Hash()) + require.False(t, has) + require.NoError(t, err) + }) } func TestIPLDGetter(t *testing.T) { diff --git a/share/getters/shrex_test.go b/share/getters/shrex_test.go index 0ca807d0d4..236fae36e1 100644 --- a/share/getters/shrex_test.go +++ b/share/getters/shrex_test.go @@ -227,11 +227,10 @@ func testManager( } manager, err := peers.NewManager( peers.DefaultParameters(), - headerSub, - shrexSub, disc, host, connGater, + peers.WithShrexSubPools(shrexSub, headerSub), ) return manager, err } diff --git a/share/getters/store.go b/share/getters/store.go index 989649f795..5eca956faa 100644 --- a/share/getters/store.go +++ b/share/getters/store.go @@ -121,6 +121,18 @@ func (sg *StoreGetter) GetSharesByNamespace( // wrap the read-only CAR blockstore in a getter blockGetter := eds.NewBlockGetter(bs) shares, err = collectSharesByNamespace(ctx, blockGetter, root, namespace) + if errors.Is(err, ipld.ErrNodeNotFound) { + // IPLD node not found after the index pointed to this shard and the CAR blockstore has been + // opened successfully is a strong indicator of corruption. We remove the block on bridges + // and fulls and return share.ErrNotFound to ensure the data is retrieved by the next + // getter. Note that this recovery is manual and will only be restored by an RPC call to + // fetch the same datahash that was removed. + err = sg.store.Remove(ctx, root.Hash()) + if err != nil { + log.Errorf("getter/store: failed to remove CAR after detected corruption: %w", err) + } + err = share.ErrNotFound + } if err != nil { return nil, fmt.Errorf("getter/store: failed to retrieve shares by namespace: %w", err) } diff --git a/share/p2p/peers/manager.go b/share/p2p/peers/manager.go index 87f9361ee2..caef242eec 100644 --- a/share/p2p/peers/manager.go +++ b/share/p2p/peers/manager.go @@ -99,11 +99,10 @@ type syncPool struct { func NewManager( params Parameters, - headerSub libhead.Subscriber[*header.ExtendedHeader], - shrexSub *shrexsub.PubSub, discovery *discovery.Discovery, host host.Host, connGater *conngater.BasicConnectionGater, + options ...Option, ) (*Manager, error) { if err := params.Validate(); err != nil { return nil, err @@ -111,8 +110,6 @@ func NewManager( s := &Manager{ params: params, - headerSub: headerSub, - shrexSub: shrexSub, connGater: connGater, disc: discovery, host: host, @@ -122,6 +119,13 @@ func NewManager( disconnectedPeersDone: make(chan struct{}), } + for _, opt := range options { + err := opt(s) + if err != nil { + return nil, err + } + } + s.fullNodes = newPool(s.params.PeerCooldown) discovery.WithOnPeersUpdate( @@ -147,12 +151,17 @@ func (m *Manager) Start(startCtx context.Context) error { ctx, cancel := context.WithCancel(context.Background()) m.cancel = cancel + // pools will only be populated with senders of shrexsub notifications if the WithShrexSubPools + // option is used. + if m.shrexSub == nil && m.headerSub == nil { + return nil + } + validatorFn := m.metrics.validationObserver(m.Validate) err := m.shrexSub.AddValidator(validatorFn) if err != nil { return fmt.Errorf("registering validator: %w", err) } - err = m.shrexSub.Start(startCtx) if err != nil { return fmt.Errorf("starting shrexsub: %w", err) @@ -168,16 +177,21 @@ func (m *Manager) Start(startCtx context.Context) error { return fmt.Errorf("subscribing to libp2p events: %w", err) } - go m.subscribeDisconnectedPeers(ctx, sub) go m.subscribeHeader(ctx, headerSub) + go m.subscribeDisconnectedPeers(ctx, sub) go m.GC(ctx) - return nil } func (m *Manager) Stop(ctx context.Context) error { m.cancel() + // we do not need to wait for headersub and disconnected peers to finish + // here, since they were never started + if m.headerSub == nil && m.shrexSub == nil { + return nil + } + select { case <-m.headerSubDone: case <-ctx.Done(): diff --git a/share/p2p/peers/manager_test.go b/share/p2p/peers/manager_test.go index ad04d2c7bd..c60a737baa 100644 --- a/share/p2p/peers/manager_test.go +++ b/share/p2p/peers/manager_test.go @@ -419,8 +419,6 @@ func TestIntegration(t *testing.T) { require.NoError(t, err) fnPeerManager, err := NewManager( DefaultParameters(), - nil, - nil, fnDisc, nil, connGater, @@ -469,11 +467,10 @@ func testManager(ctx context.Context, headerSub libhead.Subscriber[*header.Exten } manager, err := NewManager( DefaultParameters(), - headerSub, - shrexSub, disc, host, connGater, + WithShrexSubPools(shrexSub, headerSub), ) if err != nil { return nil, err diff --git a/share/p2p/peers/options.go b/share/p2p/peers/options.go index cfda906071..97ec30df4a 100644 --- a/share/p2p/peers/options.go +++ b/share/p2p/peers/options.go @@ -3,6 +3,11 @@ package peers import ( "fmt" "time" + + libhead "github.com/celestiaorg/go-header" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share/p2p/shrexsub" ) type Parameters struct { @@ -21,6 +26,8 @@ type Parameters struct { EnableBlackListing bool } +type Option func(*Manager) error + // Validate validates the values in Parameters func (p *Parameters) Validate() error { if p.PoolValidationTimeout <= 0 { @@ -56,6 +63,16 @@ func DefaultParameters() Parameters { } } +// WithShrexSubPools passes a shrexsub and headersub instance to be used to populate and validate +// pools from shrexsub notifications. +func WithShrexSubPools(shrexSub *shrexsub.PubSub, headerSub libhead.Subscriber[*header.ExtendedHeader]) Option { + return func(m *Manager) error { + m.shrexSub = shrexSub + m.headerSub = headerSub + return nil + } +} + // WithMetrics turns on metric collection in peer manager. func (m *Manager) WithMetrics() error { metrics, err := initMetrics(m) diff --git a/share/share.go b/share/share.go index 02ccd73909..4079028d82 100644 --- a/share/share.go +++ b/share/share.go @@ -2,6 +2,7 @@ package share import ( "bytes" + "encoding/hex" "fmt" "github.com/celestiaorg/celestia-app/pkg/appconsts" @@ -57,3 +58,16 @@ func (dh DataHash) String() string { func (dh DataHash) IsEmptyRoot() bool { return bytes.Equal(EmptyRoot().Hash(), dh) } + +// MustDataHashFromString converts a hex string to a valid datahash. +func MustDataHashFromString(datahash string) DataHash { + dh, err := hex.DecodeString(datahash) + if err != nil { + panic(fmt.Sprintf("datahash conversion: passed string was not valid hex: %s", datahash)) + } + err = DataHash(dh).Validate() + if err != nil { + panic(fmt.Sprintf("datahash validation: passed hex string failed: %s", err)) + } + return dh +}