diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml index 574f425c7b..654c3c0a34 100644 --- a/.github/workflows/ci_release.yml +++ b/.github/workflows/ci_release.yml @@ -24,7 +24,7 @@ on: jobs: # Dockerfile Linting hadolint: - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.1.1 # yamllint disable-line rule:line-length + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.2.0 # yamllint disable-line rule:line-length with: dockerfile: Dockerfile @@ -32,7 +32,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: celestiaorg/.github/.github/actions/yamllint@v0.1.1 + - uses: celestiaorg/.github/.github/actions/yamllint@v0.2.0 markdown-lint: name: Markdown Lint @@ -58,7 +58,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Version Release - uses: celestiaorg/.github/.github/actions/version-release@v0.1.1 + uses: celestiaorg/.github/.github/actions/version-release@v0.2.0 with: github-token: ${{secrets.GITHUB_TOKEN}} version-bump: ${{inputs.version}} diff --git a/.github/workflows/docker-build-publish.yml b/.github/workflows/docker-build-publish.yml index ce629145b9..0ddebbd750 100644 --- a/.github/workflows/docker-build-publish.yml +++ b/.github/workflows/docker-build-publish.yml @@ -17,6 +17,6 @@ jobs: permissions: contents: write packages: write - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.1.1 # yamllint disable-line rule:line-length + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.2.0 # yamllint disable-line rule:line-length with: dockerfile: Dockerfile diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml index 8f4729c7c8..e6c4e5881b 100644 --- a/.github/workflows/go-ci.yml +++ b/.github/workflows/go-ci.yml @@ -23,7 +23,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: golangci-lint - uses: golangci/golangci-lint-action@v3.4.0 + uses: golangci/golangci-lint-action@v3.6.0 with: version: v1.52.2 @@ -59,7 +59,7 @@ jobs: run: make test-unit - name: upload coverage - uses: codecov/codecov-action@v3.1.3 + uses: codecov/codecov-action@v3.1.4 with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.txt diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml index 314f2fcb14..bed2b3352c 100644 --- a/.github/workflows/labels.yml +++ b/.github/workflows/labels.yml @@ -12,7 +12,7 @@ jobs: label: runs-on: ubuntu-latest steps: - - uses: mheap/github-action-required-labels@v4 + - uses: mheap/github-action-required-labels@v5 with: mode: minimum count: 1 diff --git a/Dockerfile b/Dockerfile index 214003dc08..9346c559d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ COPY . . RUN make build && make cel-key -FROM docker.io/alpine:3.17.3 +FROM docker.io/alpine:3.18.0 # Read here why UID 10001: https://github.com/hexops/dockerfile/blob/main/README.md#do-not-use-a-uid-below-10000 ARG UID=10001 diff --git a/Makefile b/Makefile index 79c45d6597..8f4d1b42a9 100644 --- a/Makefile +++ b/Makefile @@ -152,7 +152,7 @@ pb-gen: ## openrpc-gen: Generate OpenRPC spec for Celestia-Node's RPC api openrpc-gen: @echo "--> Generating OpenRPC spec" - @go run ./cmd/docgen fraud header state share das p2p node + @go run ./cmd/docgen fraud header state share das p2p node blob .PHONY: openrpc-gen ## lint-imports: Lint only Go imports. diff --git a/api/docgen/examples.go b/api/docgen/examples.go index ae65501ba6..80a8c64d93 100644 --- a/api/docgen/examples.go +++ b/api/docgen/examples.go @@ -18,8 +18,10 @@ import ( "golang.org/x/text/language" "github.com/celestiaorg/go-fraud" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" + "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/das" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder/node" @@ -81,6 +83,8 @@ func init() { } addToExampleValues(valAddr) + addToExampleValues(state.Address{Address: addr}) + var txResponse *state.TxResponse err = json.Unmarshal([]byte(exampleTxResponse), &txResponse) if err != nil { @@ -128,6 +132,22 @@ func init() { Addrs: []multiaddr.Multiaddr{ma}, } addToExampleValues(addrInfo) + + namespace, err := share.NewNamespaceV0([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}) + if err != nil { + panic(err) + } + addToExampleValues(namespace) + + generatedBlob, err := blob.NewBlob(0, namespace, []byte("This is an example of some blob data")) + if err != nil { + panic(err) + } + addToExampleValues(generatedBlob) + + proof := nmt.NewInclusionProof(0, 4, [][]byte{[]byte("test")}, true) + blobProof := &blob.Proof{&proof} + addToExampleValues(blobProof) } func addToExampleValues(v interface{}) { diff --git a/api/gateway/availability.go b/api/gateway/availability.go index b35593e24f..e5e53e0dba 100644 --- a/api/gateway/availability.go +++ b/api/gateway/availability.go @@ -2,7 +2,6 @@ package gateway import ( "encoding/json" - "fmt" "net/http" "strconv" @@ -28,20 +27,6 @@ func (h *Handler) handleHeightAvailabilityRequest(w http.ResponseWriter, r *http return } - //TODO: change this to NetworkHead once the adjacency in the store is fixed. - head, err := h.header.LocalHead(r.Context()) - if err != nil { - writeError(w, http.StatusInternalServerError, heightAvailabilityEndpoint, err) - return - } - if headHeight := int(head.Height()); headHeight < height { - err = fmt.Errorf( - "current head local chain head: %d is lower than requested height: %d"+ - " give header sync some time and retry later", headHeight, height) - writeError(w, http.StatusServiceUnavailable, heightAvailabilityEndpoint, err) - return - } - header, err := h.header.GetByHeight(r.Context(), uint64(height)) if err != nil { writeError(w, http.StatusInternalServerError, heightAvailabilityEndpoint, err) diff --git a/api/gateway/das.go b/api/gateway/das.go index 565cbd7460..88dc97927c 100644 --- a/api/gateway/das.go +++ b/api/gateway/das.go @@ -10,6 +10,7 @@ const ( ) func (h *Handler) handleDASStateRequest(w http.ResponseWriter, r *http.Request) { + logDeprecation(dasStateEndpoint, "das.SamplingStats") stats, err := h.das.SamplingStats(r.Context()) if err != nil { writeError(w, http.StatusInternalServerError, dasStateEndpoint, err) diff --git a/api/gateway/endpoints.go b/api/gateway/endpoints.go index dfcb96bd06..0ae93b112c 100644 --- a/api/gateway/endpoints.go +++ b/api/gateway/endpoints.go @@ -5,21 +5,32 @@ import ( "net/http" ) -func (h *Handler) RegisterEndpoints(rpc *Server) { +func (h *Handler) RegisterEndpoints(rpc *Server, deprecatedEndpointsEnabled bool) { + if deprecatedEndpointsEnabled { + log.Warn("Deprecated endpoints will be removed from the gateway in the next release. Use the RPC instead.") + // state endpoints + rpc.RegisterHandlerFunc(balanceEndpoint, h.handleBalanceRequest, http.MethodGet) + rpc.RegisterHandlerFunc(submitPFBEndpoint, h.handleSubmitPFB, http.MethodPost) + + // staking queries + rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", queryDelegationEndpoint, addrKey), h.handleQueryDelegation, + http.MethodGet) + rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", queryUnbondingEndpoint, addrKey), h.handleQueryUnbonding, + http.MethodGet) + rpc.RegisterHandlerFunc(queryRedelegationsEndpoint, h.handleQueryRedelegations, + http.MethodPost) + + // DASer endpoints + // only register if DASer service is available + if h.das != nil { + rpc.RegisterHandlerFunc(dasStateEndpoint, h.handleDASStateRequest, http.MethodGet) + } + } + // state endpoints - rpc.RegisterHandlerFunc(balanceEndpoint, h.handleBalanceRequest, http.MethodGet) rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", balanceEndpoint, addrKey), h.handleBalanceRequest, http.MethodGet) rpc.RegisterHandlerFunc(submitTxEndpoint, h.handleSubmitTx, http.MethodPost) - rpc.RegisterHandlerFunc(submitPFBEndpoint, h.handleSubmitPFB, http.MethodPost) - - // staking queries - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", queryDelegationEndpoint, addrKey), h.handleQueryDelegation, - http.MethodGet) - rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", queryUnbondingEndpoint, addrKey), h.handleQueryUnbonding, - http.MethodGet) - rpc.RegisterHandlerFunc(queryRedelegationsEndpoint, h.handleQueryRedelegations, - http.MethodPost) // share endpoints rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}/height/{%s}", namespacedSharesEndpoint, nIDKey, heightKey), @@ -39,10 +50,4 @@ func (h *Handler) RegisterEndpoints(rpc *Server) { rpc.RegisterHandlerFunc(fmt.Sprintf("%s/{%s}", headerByHeightEndpoint, heightKey), h.handleHeaderRequest, http.MethodGet) rpc.RegisterHandlerFunc(headEndpoint, h.handleHeadRequest, http.MethodGet) - - // DASer endpoints - // only register if DASer service is available - if h.das != nil { - rpc.RegisterHandlerFunc(dasStateEndpoint, h.handleDASStateRequest, http.MethodGet) - } } diff --git a/api/gateway/header.go b/api/gateway/header.go index 88ac3b0923..5b8a82351c 100644 --- a/api/gateway/header.go +++ b/api/gateway/header.go @@ -2,7 +2,6 @@ package gateway import ( "encoding/json" - "fmt" "net/http" "strconv" @@ -70,24 +69,12 @@ func (h *Handler) performGetHeaderRequest( writeError(w, http.StatusBadRequest, endpoint, err) return nil, err } - //TODO: change this to NetworkHead once the adjacency in the store is fixed. - head, err := h.header.LocalHead(r.Context()) - if err != nil { - writeError(w, http.StatusInternalServerError, heightAvailabilityEndpoint, err) - return nil, err - } - if headHeight := int(head.Height()); headHeight < height { - err = fmt.Errorf( - "current head local chain head: %d is lower than requested height: %d"+ - " give header sync some time and retry later", headHeight, height) - writeError(w, http.StatusServiceUnavailable, endpoint, err) - return nil, err - } - // perform request + header, err := h.header.GetByHeight(r.Context(), uint64(height)) if err != nil { writeError(w, http.StatusInternalServerError, endpoint, err) return nil, err } + return header, nil } diff --git a/api/gateway/share.go b/api/gateway/share.go index 36a9778f1a..db5ed37286 100644 --- a/api/gateway/share.go +++ b/api/gateway/share.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "encoding/json" - "fmt" "net/http" "strconv" @@ -13,7 +12,6 @@ import ( "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/nmt/namespace" - "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/share" ) @@ -44,14 +42,14 @@ func (h *Handler) handleSharesByNamespaceRequest(w http.ResponseWriter, r *http. writeError(w, http.StatusBadRequest, namespacedSharesEndpoint, err) return } - shares, headerHeight, err := h.getShares(r.Context(), height, nID) + shares, err := h.getShares(r.Context(), height, nID) if err != nil { writeError(w, http.StatusInternalServerError, namespacedSharesEndpoint, err) return } resp, err := json.Marshal(&NamespacedSharesResponse{ Shares: shares, - Height: uint64(headerHeight), + Height: height, }) if err != nil { writeError(w, http.StatusInternalServerError, namespacedSharesEndpoint, err) @@ -69,7 +67,7 @@ func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Re writeError(w, http.StatusBadRequest, namespacedDataEndpoint, err) return } - shares, headerHeight, err := h.getShares(r.Context(), height, nID) + shares, err := h.getShares(r.Context(), height, nID) if err != nil { writeError(w, http.StatusInternalServerError, namespacedDataEndpoint, err) return @@ -81,7 +79,7 @@ func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Re } resp, err := json.Marshal(&NamespacedDataResponse{ Data: data, - Height: uint64(headerHeight), + Height: height, }) if err != nil { writeError(w, http.StatusInternalServerError, namespacedDataEndpoint, err) @@ -93,33 +91,18 @@ func (h *Handler) handleDataByNamespaceRequest(w http.ResponseWriter, r *http.Re } } -func (h *Handler) getShares(ctx context.Context, height uint64, nID namespace.ID) ([]share.Share, int64, error) { - // get header - var ( - err error - header *header.ExtendedHeader - ) - - //TODO: change this to NetworkHead once the adjacency in the store is fixed. - header, err = h.header.LocalHead(ctx) +func (h *Handler) getShares(ctx context.Context, height uint64, nID namespace.ID) ([]share.Share, error) { + header, err := h.header.GetByHeight(ctx, height) if err != nil { - return nil, 0, err + return nil, err } - if height > 0 { - if storeHeight := uint64(header.Height()); storeHeight < height { - return nil, 0, fmt.Errorf( - "current head local chain head: %d is lower than requested height: %d"+ - " give header sync some time and retry later", storeHeight, height) - } - header, err = h.header.GetByHeight(ctx, height) - } + shares, err := h.share.GetSharesByNamespace(ctx, header.DAH, nID) if err != nil { - return nil, 0, err + return nil, err } - // perform request - shares, err := h.share.GetSharesByNamespace(ctx, header.DAH, nID) - return shares.Flatten(), header.Height(), err + + return shares.Flatten(), nil } func dataFromShares(input []share.Share) (data [][]byte, err error) { diff --git a/api/gateway/share_test.go b/api/gateway/share_test.go index 423d08682b..16cf606680 100644 --- a/api/gateway/share_test.go +++ b/api/gateway/share_test.go @@ -2,11 +2,8 @@ package gateway import ( _ "embed" - "encoding/base64" - "encoding/json" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" @@ -48,37 +45,3 @@ func Test_dataFromShares(t *testing.T) { require.Equal(t, testData, parsedSSSShares) } - -// sharesBase64JSON is the base64 encoded share data from Blockspace Race -// block height 559108 and namespace e8e5f679bf7116cb. -// -//go:embed "testdata/sharesBase64.json" -var sharesBase64JSON string - -// Test_dataFromSharesBSR reproduces an error that occurred when parsing shares -// on Blockspace Race block height 559108 namespace e8e5f679bf7116cb. -// -// https://github.com/celestiaorg/celestia-app/issues/1816 -func Test_dataFromSharesBSR(t *testing.T) { - t.Skip("skip until sharesBase64JSON is regenerated with v1 compatibility") - - var sharesBase64 []string - err := json.Unmarshal([]byte(sharesBase64JSON), &sharesBase64) - assert.NoError(t, err) - input := decode(sharesBase64) - - _, err = dataFromShares(input) - assert.NoError(t, err) -} - -// decode returns the raw shares from base64Encoded. -func decode(base64Encoded []string) (rawShares [][]byte) { - for _, share := range base64Encoded { - rawShare, err := base64.StdEncoding.DecodeString(share) - if err != nil { - panic(err) - } - rawShares = append(rawShares, rawShare) - } - return rawShares -} diff --git a/api/gateway/state.go b/api/gateway/state.go index 00a318c5ae..b584b00d36 100644 --- a/api/gateway/state.go +++ b/api/gateway/state.go @@ -9,6 +9,9 @@ import ( "github.com/cosmos/cosmos-sdk/types" "github.com/gorilla/mux" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + + "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/state" ) @@ -69,8 +72,9 @@ func (h *Handler) handleBalanceRequest(w http.ResponseWriter, r *http.Request) { } addr = valAddr.Bytes() } - bal, err = h.state.BalanceForAddress(r.Context(), addr) + bal, err = h.state.BalanceForAddress(r.Context(), state.Address{Address: addr}) } else { + logDeprecation(balanceEndpoint, "state.Balance") bal, err = h.state.Balance(r.Context()) } if err != nil { @@ -119,6 +123,7 @@ func (h *Handler) handleSubmitTx(w http.ResponseWriter, r *http.Request) { } func (h *Handler) handleSubmitPFB(w http.ResponseWriter, r *http.Request) { + logDeprecation(submitPFBEndpoint, "blob.Submit or state.SubmitPayForBlob") // decode request var req submitPFBRequest err := json.NewDecoder(r.Body).Decode(&req) @@ -137,24 +142,38 @@ func (h *Handler) handleSubmitPFB(w http.ResponseWriter, r *http.Request) { return } fee := types.NewInt(req.Fee) - // perform request - txResp, err := h.state.SubmitPayForBlob(r.Context(), nID, data, fee, req.GasLimit) + + constructedBlob, err := blob.NewBlob(appconsts.DefaultShareVersion, nID, data) if err != nil { - writeError(w, http.StatusInternalServerError, submitPFBEndpoint, err) + writeError(w, http.StatusBadRequest, submitPFBEndpoint, err) return } - resp, err := json.Marshal(txResp) + + // perform request + txResp, txerr := h.state.SubmitPayForBlob(r.Context(), fee, req.GasLimit, []*blob.Blob{constructedBlob}) + if txerr != nil && txResp == nil { + // no tx data to return + writeError(w, http.StatusInternalServerError, submitPFBEndpoint, err) + } + + bs, err := json.Marshal(&txResp) if err != nil { writeError(w, http.StatusInternalServerError, submitPFBEndpoint, err) return } - _, err = w.Write(resp) + + // if error returned, change status from 200 to 206 + if txerr != nil { + w.WriteHeader(http.StatusPartialContent) + } + _, err = w.Write(bs) if err != nil { log.Errorw("writing response", "endpoint", submitPFBEndpoint, "err", err) } } func (h *Handler) handleQueryDelegation(w http.ResponseWriter, r *http.Request) { + logDeprecation(queryDelegationEndpoint, "state.QueryDelegation") // read and parse request vars := mux.Vars(r) addrStr, exists := vars[addrKey] @@ -186,6 +205,7 @@ func (h *Handler) handleQueryDelegation(w http.ResponseWriter, r *http.Request) } func (h *Handler) handleQueryUnbonding(w http.ResponseWriter, r *http.Request) { + logDeprecation(queryUnbondingEndpoint, "state.QueryUnbonding") // read and parse request vars := mux.Vars(r) addrStr, exists := vars[addrKey] @@ -217,6 +237,7 @@ func (h *Handler) handleQueryUnbonding(w http.ResponseWriter, r *http.Request) { } func (h *Handler) handleQueryRedelegations(w http.ResponseWriter, r *http.Request) { + logDeprecation(queryRedelegationsEndpoint, "state.QueryRedelegations") var req queryRedelegationsRequest err := json.NewDecoder(r.Body).Decode(&req) if err != nil { @@ -248,3 +269,8 @@ func (h *Handler) handleQueryRedelegations(w http.ResponseWriter, r *http.Reques log.Errorw("writing response", "endpoint", queryRedelegationsEndpoint, "err", err) } } + +func logDeprecation(endpoint string, alternative string) { + log.Warn("The " + endpoint + " endpoint is deprecated and will be removed in the next release. Please " + + "use " + alternative + " from the RPC instead.") +} diff --git a/api/gateway/state_test.go b/api/gateway/state_test.go new file mode 100644 index 0000000000..a613471a04 --- /dev/null +++ b/api/gateway/state_test.go @@ -0,0 +1,57 @@ +package gateway + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + stateMock "github.com/celestiaorg/celestia-node/nodebuilder/state/mocks" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/state" +) + +func TestHandleSubmitPFB(t *testing.T) { + ctrl := gomock.NewController(t) + mock := stateMock.NewMockModule(ctrl) + handler := NewHandler(mock, nil, nil, nil) + + t.Run("partial response", func(t *testing.T) { + txResponse := state.TxResponse{ + Height: 1, + TxHash: "hash", + Codespace: "codespace", + Code: 1, + } + // simulate core-app err, since it is not exported + timedErr := errors.New("timed out waiting for tx to be included in a block") + mock.EXPECT().SubmitPayForBlob(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(&txResponse, timedErr) + + ns, err := share.NewNamespaceV0([]byte("abc")) + require.NoError(t, err) + hexNs := hex.EncodeToString(ns[:]) + + bs, err := json.Marshal(submitPFBRequest{ + NamespaceID: hexNs, + Data: "DEADBEEF", + }) + require.NoError(t, err) + httpreq := httptest.NewRequest("GET", "/", bytes.NewReader(bs)) + respRec := httptest.NewRecorder() + handler.handleSubmitPFB(respRec, httpreq) + + var resp state.TxResponse + err = json.NewDecoder(respRec.Body).Decode(&resp) + require.NoError(t, err) + + require.Equal(t, http.StatusPartialContent, respRec.Code) + require.Equal(t, resp, txResponse) + }) +} diff --git a/api/gateway/testdata/sharesBase64.json b/api/gateway/testdata/sharesBase64.json deleted file mode 100644 index e73d69f420..0000000000 --- a/api/gateway/testdata/sharesBase64.json +++ /dev/null @@ -1,8 +0,0 @@ -[ - "6OX2eb9xFssBAAAEQwBIdZAtC1rH+hFrZ7F6azCBAAAAAAQreNqM02s323cAAGBVRFEjy9ENcVuibWqbtSpxGWWVuESNkhDZEYxlVZdqWd2bLhU1GkFHxzQRl6TVTpW4xQ5FXdJqyTnp1KXuSnpaC0GJbKdn/73/fYHn3dNG1Nj24dbtlBwl5gUX4fwF1xmJg2X25QTU1a3ApPnwUCaSjDxwhTuIScHdY0ipKG1D5JOi66xWtbDcgtinX1PTO1YnWtRkfh+Pt+r+TwvkrrldM1ffDDrrg3sRRhsNFCj98EYXCuMeuapztgzSwTQbSKPKlnK3O3yDjyPuyDoNx7F5kx6jLc18UZZPpBdjJVMLTENBmrYF5WeOjgmentI1pBO5ILuduFCxc0PBIorzxZqvO4Vgmi2kuWlLZc5we9RFzue3XIuteS5tB8iTjcnn43hTXxM0JqLBtCOQFnN5IcY6Ej3Yo+GJx2O5dITSJM8v6/UxV1fmUDH12TqYhoE0UrYG7RAimeQxUfLTwQBLA7b8L/um/ggX+WL/ZlhqhBxMs4M03Z5ShCJ7Zrde/eKZZC/mCL+I8ZX7LEwrqIo8M8DM2QHTvoA0ZIfXStvLhrtI80zlYX3vSyFa5yzYn9ClG5Fqn870iex9XHv42Kg08S75gTytyENSVZFXTh/Zh6SGiHM=", - "6OX2eb9xFssA0rF2e0KnD5o9pNk4qqwxTgH1FbeHqPQuwUvJSU27f1b2T519HL3DZ9lSwLTjkPbG7cf37HVVvaXYqd0lMXg5VPSb70CdMLQ1vHV5ulsvDUxzgDS8V3KWimzsUHi5LHBjweGbAiXFdthUl1peSsrSM5sPAtMcIU2yjHIswLYbCPiMj30cWbrnl725tuPTubW3Vpk80nw1mIaDNIXV7KE9dP7bRsuUORVsMeddZT9WFNID68HBK3v5ccZgmjOkoSitGRSj8BsXN/EwuLHcv3dpd9P0lzX+meGPnu9/+h0PTHOFNIyxC83lyjHh7vBUB/nPNPjbLF1CPC9Tf+zvExnNY5/qaHITwuo5KNOh2ldOc32xJu65QQokrMBuWPBtiWfA6dWT7A+aG6SZPM+Z/cPPxqzhAnrVrSO2ITpx8sRDkacyOM5PPllcfBRMOwVpucEoTKlSsHL/Uizd5ipHIduZdbb0Cqh413zwzpy6MQlM84Q0QkztzIj3Lq6FMXCzxQvVa06MFY4+fiJYIiA5NMziHph2+v9Z7cO8X839w++Nm1TTspMyzFYf3I9HM9fLih7JESMGNDAND2nVnEppVJOlSCTVH1yrx+zJtlJ5hlhPfnxVoQqbxnME07whracrjtJM9+3bFs8oFuGq+agEnFNS8kYCCV36mR4=", - "6OX2eb9xFssATGwIpvlCmojMCZnoPswyLpXMPWwValWdC3rzlGDUhOhApyZ8+XsUmEaEtOk6q/c895prbA8li9SZP3WKrTH7ymMgMmJB8oNyTo8Kpp3p/jcAAP//G9WQYQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", - "6OX2eb9xFssBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", - "6OX2eb9xFssBAAAC7gBH3m9XunkTbdymgmQ9EGUAAAAAAALWeNra4cfww3vB69mdR5Z4p8T9O/XhRMXJL1Y3mXwXyZuY/cyXUVk5vXTt8iYVvQXH1vN+kTk1L/iF77IaW02r+b9ril+9vVWz49KhjQbHPiyob0nJddM7ADFvpvxmVyfvNdsq330QChUvZipu3Dqhskl/Qo+YzFX+9a/2NanoL1jyNeZ8n1tC5bU/3WfbtJqrrW90nmiZ+LvD8Dqn6I+IPa9A5hlAzTPc/3d2j8lDd/7JfpVtnZkHnH9OPuZ8smOP95/c6co3VKcTa54R1Ly6unfXHLac3aclWXv63/LJJ2riWVoLc5YmxSw/WJHQ7bCfWPNMoOZtOGxz7dKGu3sKROMqAjrMK3xnn3t3e3etf47iluULrmsxEGueGdS83vsqTC/4X57I17nRpxCi8X1lF9/Mk4V5txbyBq/R+mG2kVjzLKDm/e81kP/w7+n9uPsf2SKO37f31Gx72SoTFaGzdHF55Ksvi4k1zwpqngV3p2344yMvVj/TeC3lMnHprSdT1YJfGHjyz+LrnZ6q3EqseTZQ88S+W96xfj1F2f2eUU/y16CrHcfnvf3qdOtlkHXH86+pK5ybVAwW/Dr1pGCZzK5QtStHtb6sVFuQsig4ftZq1a46qUdT7m3hTwc=", - "6OX2eb9xFssAmWcHNW+S6eFdYXNeXPrbcmVS4Jo9zG/2vrsme05s1tMfW6ILLH3mEmueA9S8tka+O/zFxSXz6hqnZpm2qt393sGal79490QTjXrxzf8/EmueE9S8yDZJq+P6fi+8XnRW7n/yzPKX4gfLkLrsmOPuDYuKWus1iTXPBWrem89/37l9ejxT/s0cZYe5K+IN6h6qzF377/rp9dnvVBrmXSLWPDeoeclv12ZbWd3OC3QV35SZeZija9qGZcu/bmfJtjc4azH/YS+x5nlAzWO7/o/LxPc+X1DUE8299hUT5bYXWs6c+FBhWuPC6T5sPMuINc/rACAAAP//N89ueAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" -] diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index 783d3f53cd..7ac8a55b3d 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -17,17 +18,11 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/state" ) -// TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 -var client Client -var Modules = map[string]interface{}{ - "share": &client.Share.Internal, - "state": &client.State.Internal, - "header": &client.Header.Internal, - "fraud": &client.Fraud.Internal, - "das": &client.DAS.Internal, - "p2p": &client.P2P.Internal, - "node": &client.Node.Internal, -} +var ( + // staticClient is used for generating the OpenRPC spec. + staticClient Client + Modules = moduleMap(&staticClient) +) type Client struct { Fraud fraud.API @@ -37,6 +32,7 @@ type Client struct { DAS das.API P2P p2p.API Node node.API + Blob blob.API closer multiClientCloser } @@ -58,7 +54,7 @@ func (m *multiClientCloser) closeAll() { } } -// Close closes the connections to all namespaces registered on the client. +// Close closes the connections to all namespaces registered on the staticClient. func (c *Client) Close() { c.closer.closeAll() } @@ -77,7 +73,8 @@ func NewClient(ctx context.Context, addr string, token string) (*Client, error) func newClient(ctx context.Context, addr string, authHeader http.Header) (*Client, error) { var multiCloser multiClientCloser - for name, module := range Modules { + var client Client + for name, module := range moduleMap(&client) { closer, err := jsonrpc.NewClient(ctx, addr, name, module, authHeader) if err != nil { return nil, err @@ -87,3 +84,17 @@ func newClient(ctx context.Context, addr string, authHeader http.Header) (*Clien return &client, nil } + +func moduleMap(client *Client) map[string]interface{} { + // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 + return map[string]interface{}{ + "share": &client.Share.Internal, + "state": &client.State.Internal, + "header": &client.Header.Internal, + "fraud": &client.Fraud.Internal, + "das": &client.DAS.Internal, + "p2p": &client.P2P.Internal, + "node": &client.Node.Internal, + "blob": &client.Blob.Internal, + } +} diff --git a/api/rpc/server.go b/api/rpc/server.go index a4c8c21ce7..3357140e68 100644 --- a/api/rpc/server.go +++ b/api/rpc/server.go @@ -2,7 +2,6 @@ package rpc import ( "context" - "encoding/json" "net" "net/http" "reflect" @@ -15,6 +14,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/libs/authtoken" ) var log = logging.Logger("rpc") @@ -51,17 +51,7 @@ func NewServer(address, port string, secret jwt.Signer) *Server { // reached if a token is provided in the header of the request, otherwise only // methods with `read` permissions are accessible. func (s *Server) verifyAuth(_ context.Context, token string) ([]auth.Permission, error) { - tk, err := jwt.ParseAndVerifyString(token, s.auth) - if err != nil { - return nil, err - } - p := new(perms.JWTPayload) - err = json.Unmarshal(tk.RawClaims(), p) - if err != nil { - return nil, err - } - // check permissions - return p.Allow, nil + return authtoken.ExtractSignedPermissions(s.auth, token) } // RegisterService registers a service onto the RPC server. All methods on the service will then be diff --git a/api/rpc_test.go b/api/rpc_test.go index f2e088ff1f..898a307389 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -21,6 +21,8 @@ import ( daspkg "github.com/celestiaorg/celestia-node/das" headerpkg "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/nodebuilder" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" + blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" dasMock "github.com/celestiaorg/celestia-node/nodebuilder/das/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -81,6 +83,7 @@ type api struct { DAS das.Module Node node.Module P2P p2p.Module + Blob blob.Module } func TestModulesImplementFullAPI(t *testing.T) { @@ -336,6 +339,7 @@ func setupNodeWithModifiedRPC(t *testing.T) (*nodebuilder.Node, *mockAPI) { dasMock.NewMockModule(ctrl), p2pMock.NewMockModule(ctrl), nodeMock.NewMockModule(ctrl), + blobMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -348,6 +352,7 @@ func setupNodeWithModifiedRPC(t *testing.T) (*nodebuilder.Node, *mockAPI) { srv.RegisterService("das", mockAPI.Das) srv.RegisterService("p2p", mockAPI.P2P) srv.RegisterService("node", mockAPI.Node) + srv.RegisterService("blob", mockAPI.Blob) }) nd := nodebuilder.TestNode(t, node.Full, invokeRPC) // start node @@ -376,6 +381,7 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * dasMock.NewMockModule(ctrl), p2pMock.NewMockModule(ctrl), nodeMock.NewMockModule(ctrl), + blobMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -388,6 +394,7 @@ func setupNodeWithAuthedRPC(t *testing.T, auth jwt.Signer) (*nodebuilder.Node, * srv.RegisterAuthedService("das", mockAPI.Das, &das.API{}) srv.RegisterAuthedService("p2p", mockAPI.P2P, &p2p.API{}) srv.RegisterAuthedService("node", mockAPI.Node, &node.API{}) + srv.RegisterAuthedService("blob", mockAPI.Blob, &blob.API{}) }) // fx.Replace does not work here, but fx.Decorate does nd := nodebuilder.TestNode(t, node.Full, invokeRPC, fx.Decorate(func() (jwt.Signer, error) { @@ -411,4 +418,5 @@ type mockAPI struct { Das *dasMock.MockModule P2P *p2pMock.MockModule Node *nodeMock.MockModule + Blob *blobMock.MockModule } diff --git a/blob/blob.go b/blob/blob.go new file mode 100644 index 0000000000..9771714cb9 --- /dev/null +++ b/blob/blob.go @@ -0,0 +1,164 @@ +package blob + +import ( + "bytes" + "encoding/json" + "fmt" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/celestia-app/x/blob/types" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/share/ipld" +) + +// Commitment is a Merkle Root of the subtree built from shares of the Blob. +// It is computed by splitting the blob into shares and building the Merkle subtree to be included +// after Submit. +type Commitment []byte + +func (com Commitment) String() string { + return string(com) +} + +// Equal ensures that commitments are the same +func (com Commitment) Equal(c Commitment) bool { + return bytes.Equal(com, c) +} + +// Proof is a collection of nmt.Proofs that verifies the inclusion of the data. +type Proof []*nmt.Proof + +func (p Proof) Len() int { return len(p) } + +// equal is a temporary method that compares two proofs. +// should be removed in BlobService V1. +func (p Proof) equal(input Proof) error { + if p.Len() != input.Len() { + return ErrInvalidProof + } + + for i, proof := range p { + pNodes := proof.Nodes() + inputNodes := input[i].Nodes() + for i, node := range pNodes { + if !bytes.Equal(node, inputNodes[i]) { + return ErrInvalidProof + } + } + + if proof.Start() != input[i].Start() || proof.End() != input[i].End() { + return ErrInvalidProof + } + + if !bytes.Equal(proof.LeafHash(), input[i].LeafHash()) { + return ErrInvalidProof + } + + } + return nil +} + +type jsonProof struct { + Start int `json:"start"` + End int `json:"end"` + Nodes [][]byte `json:"nodes"` +} + +func (p *Proof) MarshalJSON() ([]byte, error) { + proofs := make([]jsonProof, 0, p.Len()) + for _, pp := range *p { + proofs = append(proofs, jsonProof{ + Start: pp.Start(), + End: pp.End(), + Nodes: pp.Nodes(), + }) + } + + return json.Marshal(proofs) +} + +func (p *Proof) UnmarshalJSON(data []byte) error { + var proofs []jsonProof + err := json.Unmarshal(data, &proofs) + if err != nil { + return err + } + + nmtProofs := make([]*nmt.Proof, len(proofs)) + for i, jProof := range proofs { + nmtProof := nmt.NewInclusionProof(jProof.Start, jProof.End, jProof.Nodes, ipld.NMTIgnoreMaxNamespace) + nmtProofs[i] = &nmtProof + } + + *p = nmtProofs + return nil +} + +// Blob represents any application-specific binary data that anyone can submit to Celestia. +type Blob struct { + types.Blob `json:"blob"` + + Commitment Commitment `json:"commitment"` +} + +// NewBlob constructs a new blob from the provided namespace.ID and data. +func NewBlob(shareVersion uint8, namespace namespace.ID, data []byte) (*Blob, error) { + if len(namespace) != appns.NamespaceSize { + return nil, fmt.Errorf("invalid size of the namespace id. got:%d, want:%d", len(namespace), appns.NamespaceSize) + } + + ns, err := appns.New(namespace[appns.NamespaceVersionSize-1], namespace[appns.NamespaceVersionSize:]) + if err != nil { + return nil, err + } + + blob, err := types.NewBlob(ns, data, shareVersion) + if err != nil { + return nil, err + } + + com, err := types.CreateCommitment(blob) + if err != nil { + return nil, err + } + return &Blob{Blob: *blob, Commitment: com}, nil +} + +// Namespace returns blob's namespace. +func (b *Blob) Namespace() namespace.ID { + return append([]byte{uint8(b.NamespaceVersion)}, b.NamespaceId...) +} + +type jsonBlob struct { + Namespace namespace.ID `json:"namespace"` + Data []byte `json:"data"` + ShareVersion uint32 `json:"share_version"` + Commitment Commitment `json:"commitment"` +} + +func (b *Blob) MarshalJSON() ([]byte, error) { + blob := &jsonBlob{ + Namespace: b.Namespace(), + Data: b.Data, + ShareVersion: b.ShareVersion, + Commitment: b.Commitment, + } + return json.Marshal(blob) +} + +func (b *Blob) UnmarshalJSON(data []byte) error { + var blob jsonBlob + err := json.Unmarshal(data, &blob) + if err != nil { + return err + } + + b.Blob.NamespaceVersion = uint32(blob.Namespace[0]) + b.Blob.NamespaceId = blob.Namespace[1:] + b.Blob.Data = blob.Data + b.Blob.ShareVersion = blob.ShareVersion + b.Commitment = blob.Commitment + return nil +} diff --git a/blob/blob_test.go b/blob/blob_test.go new file mode 100644 index 0000000000..3aabd6559b --- /dev/null +++ b/blob/blob_test.go @@ -0,0 +1,94 @@ +package blob + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + apptypes "github.com/celestiaorg/celestia-app/x/blob/types" + + "github.com/celestiaorg/celestia-node/blob/blobtest" +) + +func TestBlob(t *testing.T) { + appBlobs, err := blobtest.GenerateBlobs([]int{1}, false) + require.NoError(t, err) + blob, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + var test = []struct { + name string + expectedRes func(t *testing.T) + }{ + { + name: "new blob", + expectedRes: func(t *testing.T) { + require.NotEmpty(t, blob) + require.NotEmpty(t, blob[0].Namespace()) + require.NotEmpty(t, blob[0].Data) + require.NotEmpty(t, blob[0].Commitment) + }, + }, + { + name: "compare commitments", + expectedRes: func(t *testing.T) { + comm, err := apptypes.CreateCommitment(&blob[0].Blob) + require.NoError(t, err) + assert.Equal(t, blob[0].Commitment, Commitment(comm)) + }, + }, + { + name: "verify nID", + expectedRes: func(t *testing.T) { + ns, err := appns.New( + blob[0].Namespace()[appns.NamespaceVersionSize-1], + blob[0].Namespace()[appns.NamespaceVersionSize:], + ) + require.NoError(t, err) + require.NoError(t, apptypes.ValidateBlobNamespace(ns)) + }, + }, + { + name: "shares to blobs", + expectedRes: func(t *testing.T) { + sh, err := BlobsToShares(blob...) + require.NoError(t, err) + b, err := SharesToBlobs(sh) + require.NoError(t, err) + assert.Equal(t, len(b), 1) + assert.Equal(t, blob[0].Commitment, b[0].Commitment) + }, + }, + { + name: "blob marshaling", + expectedRes: func(t *testing.T) { + data, err := blob[0].MarshalJSON() + require.NoError(t, err) + + newBlob := &Blob{} + require.NoError(t, newBlob.UnmarshalJSON(data)) + require.True(t, reflect.DeepEqual(blob[0], newBlob)) + }, + }, + } + + for _, tt := range test { + t.Run(tt.name, tt.expectedRes) + } +} + +func convertBlobs(appBlobs ...types.Blob) ([]*Blob, error) { + blobs := make([]*Blob, 0, len(appBlobs)) + for _, b := range appBlobs { + blob, err := NewBlob(b.ShareVersion, append([]byte{b.NamespaceVersion}, b.NamespaceID...), b.Data) + if err != nil { + return nil, err + } + blobs = append(blobs, blob) + } + return blobs, nil +} diff --git a/blob/blobtest/testing.go b/blob/blobtest/testing.go new file mode 100644 index 0000000000..395ef4167a --- /dev/null +++ b/blob/blobtest/testing.go @@ -0,0 +1,36 @@ +package blobtest + +import ( + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/test/util/testfactory" + + "github.com/celestiaorg/celestia-node/share" +) + +func GenerateBlobs(sizes []int, sameNID bool) ([]types.Blob, error) { + blobs := make([]types.Blob, 0, len(sizes)) + + for _, size := range sizes { + size := rawBlobSize(appconsts.FirstSparseShareContentSize * size) + appBlob := testfactory.GenerateRandomBlob(size) + if !sameNID { + nid, err := share.NewNamespaceV0(tmrand.Bytes(7)) + if err != nil { + return nil, err + } + appBlob.NamespaceVersion = nid[0] + appBlob.NamespaceID = nid[1:] + } + + blobs = append(blobs, appBlob) + } + return blobs, nil +} + +func rawBlobSize(totalSize int) int { + return totalSize - shares.DelimLen(uint64(totalSize)) +} diff --git a/blob/helper.go b/blob/helper.go new file mode 100644 index 0000000000..1fef41dc22 --- /dev/null +++ b/blob/helper.go @@ -0,0 +1,109 @@ +package blob + +import ( + "bytes" + "sort" + + "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + + "github.com/celestiaorg/celestia-node/share" +) + +// SharesToBlobs takes raw shares and converts them to the blobs. +func SharesToBlobs(rawShares []share.Share) ([]*Blob, error) { + if len(rawShares) == 0 { + return nil, ErrBlobNotFound + } + + appShares := make([]shares.Share, 0, len(rawShares)) + for _, sh := range rawShares { + bShare, err := shares.NewShare(sh) + if err != nil { + return nil, err + } + appShares = append(appShares, *bShare) + } + + shareSequences, err := shares.ParseShares(appShares, true) + if err != nil { + return nil, err + } + + blobs := make([]*Blob, len(shareSequences)) + for i, sequence := range shareSequences { + data, err := sequence.RawData() + if err != nil { + return nil, err + } + if len(data) == 0 { + continue + } + + shareVersion, err := sequence.Shares[0].Version() + if err != nil { + return nil, err + } + + blob, err := NewBlob(shareVersion, sequence.Namespace.Bytes(), data) + if err != nil { + return nil, err + } + blobs[i] = blob + } + return blobs, nil +} + +// BlobsToShares accepts blobs and convert them to the Shares. +func BlobsToShares(blobs ...*Blob) ([]share.Share, error) { + b := make([]types.Blob, len(blobs)) + for i, blob := range blobs { + namespace := blob.Namespace() + b[i] = types.Blob{ + NamespaceVersion: namespace[0], + NamespaceID: namespace[1:], + Data: blob.Data, + ShareVersion: uint8(blob.ShareVersion), + } + } + + sort.Slice(b, func(i, j int) bool { + val := bytes.Compare(b[i].NamespaceID, b[j].NamespaceID) + return val <= 0 + }) + + rawShares, err := shares.SplitBlobs(0, nil, b, false) + if err != nil { + return nil, err + } + return shares.ToBytes(rawShares), nil +} + +const ( + perByteGasTolerance = 2 + pfbGasFixedCost = 80000 +) + +// estimateGas estimates the gas required to pay for a set of blobs in a PFB. +func estimateGas(blobs ...*Blob) uint64 { + totalByteCount := 0 + for _, blob := range blobs { + totalByteCount += len(blob.Data) + appconsts.NamespaceSize + } + variableGasAmount := (appconsts.DefaultGasPerBlobByte + perByteGasTolerance) * totalByteCount + + return uint64(variableGasAmount + pfbGasFixedCost) +} + +// constructAndVerifyBlob reconstruct a Blob from the passed shares and compares commitments. +func constructAndVerifyBlob(sh []share.Share, commitment Commitment) (*Blob, bool, error) { + blob, err := SharesToBlobs(sh) + if err != nil { + return nil, false, err + } + + equal := blob[0].Commitment.Equal(commitment) + return blob[0], equal, nil +} diff --git a/blob/service.go b/blob/service.go new file mode 100644 index 0000000000..0b43493e27 --- /dev/null +++ b/blob/service.go @@ -0,0 +1,290 @@ +package blob + +import ( + "context" + "errors" + "fmt" + "sync" + + "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/types" + logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/share" +) + +var ( + ErrBlobNotFound = errors.New("blob: not found") + ErrInvalidProof = errors.New("blob: invalid proof") + + log = logging.Logger("blob") +) + +// Submitter is an interface that allows submitting blobs to the celestia-core. It is used to +// avoid a circular dependency between the blob and the state package, since the state package needs +// the blob.Blob type for this signature. +type Submitter interface { + SubmitPayForBlob(ctx context.Context, fee math.Int, gasLim uint64, blobs []*Blob) (*types.TxResponse, error) +} + +type Service struct { + // accessor dials the given celestia-core endpoint to submit blobs. + blobSumitter Submitter + // shareGetter retrieves the EDS to fetch all shares from the requested header. + shareGetter share.Getter + // headerGetter fetches header by the provided height + headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error) +} + +func NewService( + submitter Submitter, + getter share.Getter, + headerGetter func(context.Context, uint64) (*header.ExtendedHeader, error), +) *Service { + return &Service{ + blobSumitter: submitter, + shareGetter: getter, + headerGetter: headerGetter, + } +} + +// Submit sends PFB transaction and reports the height in which it was included. +// Allows sending multiple Blobs atomically synchronously. +// Uses default wallet registered on the Node. +func (s *Service) Submit(ctx context.Context, blobs []*Blob) (uint64, error) { + log.Debugw("submitting blobs", "amount", len(blobs)) + + var ( + gasLimit = estimateGas(blobs...) + fee = int64(appconsts.DefaultMinGasPrice * float64(gasLimit)) + ) + + resp, err := s.blobSumitter.SubmitPayForBlob(ctx, types.NewInt(fee), gasLimit, blobs) + if err != nil { + return 0, err + } + return uint64(resp.Height), nil +} + +// Get retrieves all the blobs for given namespaces at the given height by commitment. +func (s *Service) Get(ctx context.Context, height uint64, nID namespace.ID, commitment Commitment) (*Blob, error) { + blob, _, err := s.getByCommitment(ctx, height, nID, commitment) + if err != nil { + return nil, err + } + return blob, nil +} + +// GetProof retrieves all blobs in the given namespaces at the given height by commitment +// and returns their Proof. +func (s *Service) GetProof( + ctx context.Context, + height uint64, + nID namespace.ID, + commitment Commitment, +) (*Proof, error) { + _, proof, err := s.getByCommitment(ctx, height, nID, commitment) + if err != nil { + return nil, err + } + return proof, nil +} + +// GetAll returns all blobs under the given namespaces at the given height. +// GetAll can return blobs and an error in case if some requests failed. +func (s *Service) GetAll(ctx context.Context, height uint64, nIDs []namespace.ID) ([]*Blob, error) { + header, err := s.headerGetter(ctx, height) + if err != nil { + return nil, err + } + + var ( + resultBlobs = make([][]*Blob, len(nIDs)) + resultErr = make([]error, len(nIDs)) + ) + + wg := sync.WaitGroup{} + for i, nID := range nIDs { + wg.Add(1) + go func(i int, nID namespace.ID) { + defer wg.Done() + blobs, err := s.getBlobs(ctx, nID, header.DAH) + if err != nil { + resultErr[i] = fmt.Errorf("getting blobs for nID(%s): %s", nID.String(), err) + return + } + resultBlobs[i] = blobs + }(i, nID) + } + wg.Wait() + + blobs := make([]*Blob, 0) + for _, resBlobs := range resultBlobs { + if len(resBlobs) > 0 { + blobs = append(blobs, resBlobs...) + } + } + + if len(blobs) == 0 { + resultErr = append(resultErr, ErrBlobNotFound) + } + return blobs, errors.Join(resultErr...) +} + +// Included verifies that the blob was included in a specific height. +// To ensure that blob was included in a specific height, we need: +// 1. verify the provided commitment by recomputing it; +// 2. verify the provided Proof against subtree roots that were used in 1.; +func (s *Service) Included( + ctx context.Context, + height uint64, + nID namespace.ID, + proof *Proof, + com Commitment, +) (bool, error) { + // In the current implementation, LNs will have to download all shares to recompute the commitment. + // To achieve 1. we need to modify Proof structure and to store all subtree roots, that were + // involved in commitment creation and then call `merkle.HashFromByteSlices`(tendermint package). + // nmt.Proof is verifying share inclusion by recomputing row roots, so, theoretically, we can do + // the same but using subtree roots. For this case, we need an extra method in nmt.Proof + // that will perform all reconstructions, + // but we have to guarantee that all our stored subtree roots will be on the same height(e.g. one + // level above shares). + // TODO(@vgonkivs): rework the implementation to perform all verification without network requests. + _, resProof, err := s.getByCommitment(ctx, height, nID, com) + switch err { + case nil: + case ErrBlobNotFound: + return false, nil + default: + return false, err + } + return true, resProof.equal(*proof) +} + +// getByCommitment retrieves the DAH row by row, fetching shares and constructing blobs in order to +// compare Commitments. Retrieving is stopped once the requested blob/proof is found. +func (s *Service) getByCommitment( + ctx context.Context, + height uint64, + nID namespace.ID, + commitment Commitment, +) (*Blob, *Proof, error) { + log.Infow("requesting blob", + "height", height, + "nID", nID.String()) + + header, err := s.headerGetter(ctx, height) + if err != nil { + return nil, nil, err + } + + var ( + rawShares = make([]share.Share, 0) + proofs = make(Proof, 0) + amount int + blobShare *shares.Share + ) + + namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, header.DAH, nID) + if err != nil { + if errors.Is(err, share.ErrNamespaceNotFound) || + errors.Is(err, share.ErrNotFound) { + err = ErrBlobNotFound + } + return nil, nil, err + } + for _, row := range namespacedShares { + if len(row.Shares) == 0 { + break + } + + rawShares = append(rawShares, row.Shares...) + proofs = append(proofs, row.Proof) + + // reconstruct the `blobShare` from the first rawShare in range + // in order to get blob's length(first share will contain this info) + if blobShare == nil { + for i, sh := range rawShares { + bShare, err := shares.NewShare(sh) + if err != nil { + return nil, nil, err + } + + // ensure that the first share is not a NamespacePaddingShare + // these shares are used to satisfy the non-interactive default rules + // and are not the part of the blob, so should be removed. + isPadding, err := bShare.IsPadding() + if err != nil { + return nil, nil, err + } + if isPadding { + continue + } + + blobShare = bShare + // save the length. + length, err := blobShare.SequenceLen() + if err != nil { + return nil, nil, err + } + amount = shares.SparseSharesNeeded(length) + rawShares = rawShares[i:] + break + } + } + + // move to the next row if the blob is incomplete. + if amount > len(rawShares) { + continue + } + + blob, same, err := constructAndVerifyBlob(rawShares[:amount], commitment) + if err != nil { + return nil, nil, err + } + if same { + return blob, &proofs, nil + } + + // drop info of the checked blob + rawShares = rawShares[amount:] + if len(rawShares) > 0 { + // save proof for the last row in case we have rawShares + proofs = proofs[len(proofs)-1:] + } else { + // otherwise clear proofs + proofs = nil + } + blobShare = nil + } + + if len(rawShares) == 0 { + return nil, nil, ErrBlobNotFound + } + + blob, same, err := constructAndVerifyBlob(rawShares, commitment) + if err != nil { + return nil, nil, err + } + if same { + return blob, &proofs, nil + } + + return nil, nil, ErrBlobNotFound +} + +// getBlobs retrieves the DAH and fetches all shares from the requested namespace.ID and converts +// them to Blobs. +func (s *Service) getBlobs(ctx context.Context, nID namespace.ID, root *share.Root) ([]*Blob, error) { + namespacedShares, err := s.shareGetter.GetSharesByNamespace(ctx, root, nID) + if err != nil { + return nil, err + } + return SharesToBlobs(namespacedShares.Flatten()) +} diff --git a/blob/service_test.go b/blob/service_test.go new file mode 100644 index 0000000000..ee6e982fe8 --- /dev/null +++ b/blob/service_test.go @@ -0,0 +1,431 @@ +package blob + +import ( + "bytes" + "context" + "crypto/sha256" + "testing" + "time" + + ds "github.com/ipfs/go-datastore" + ds_sync "github.com/ipfs/go-datastore/sync" + mdutils "github.com/ipfs/go-merkledag/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/header" + "github.com/celestiaorg/celestia-node/header/headertest" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/getters" +) + +func TestBlobService_Get(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + var ( + blobSize0 = 18 + blobSize1 = 14 + blobSize2 = 20 + blobSize3 = 12 + ) + + appBlobs, err := blobtest.GenerateBlobs([]int{blobSize0, blobSize1}, false) + require.NoError(t, err) + blobs0, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + appBlobs, err = blobtest.GenerateBlobs([]int{blobSize2, blobSize3}, true) + require.NoError(t, err) + blobs1, err := convertBlobs(appBlobs...) + require.NoError(t, err) + + service := createService(ctx, t, append(blobs0, blobs1...)) + var test = []struct { + name string + doFn func() (interface{}, error) + expectedResult func(interface{}, error) + }{ + { + name: "get single blob", + doFn: func() (interface{}, error) { + b, err := service.Get(ctx, 1, blobs0[0].Namespace(), blobs0[0].Commitment) + return []*Blob{b}, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + assert.NotEmpty(t, res) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.Len(t, blobs, 1) + + assert.Equal(t, blobs0[0].Commitment, blobs[0].Commitment) + }, + }, + { + name: "get all with the same nID", + doFn: func() (interface{}, error) { + b, err := service.GetAll(ctx, 1, []namespace.ID{blobs1[0].Namespace()}) + return b, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.NotEmpty(t, blobs) + + assert.Len(t, blobs, 2) + + for i := range blobs1 { + bytes.Equal(blobs1[i].Commitment, blobs[i].Commitment) + } + }, + }, + { + name: "get all with different nIDs", + doFn: func() (interface{}, error) { + b, err := service.GetAll(ctx, 1, []namespace.ID{blobs0[0].Namespace(), blobs0[1].Namespace()}) + return b, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.NotEmpty(t, blobs) + + assert.Len(t, blobs, 2) + // check the order + require.True(t, bytes.Equal(blobs[0].Namespace(), blobs0[0].Namespace())) + require.True(t, bytes.Equal(blobs[1].Namespace(), blobs0[1].Namespace())) + }, + }, + { + name: "get blob with incorrect commitment", + doFn: func() (interface{}, error) { + b, err := service.Get(ctx, 1, blobs0[0].Namespace(), blobs0[1].Commitment) + return []*Blob{b}, err + }, + expectedResult: func(res interface{}, err error) { + require.Error(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.Empty(t, blobs[0]) + }, + }, + { + name: "get invalid blob", + doFn: func() (interface{}, error) { + appBlob, err := blobtest.GenerateBlobs([]int{10}, false) + require.NoError(t, err) + blob, err := convertBlobs(appBlob...) + require.NoError(t, err) + + b, err := service.Get(ctx, 1, blob[0].Namespace(), blob[0].Commitment) + return []*Blob{b}, err + }, + expectedResult: func(res interface{}, err error) { + require.Error(t, err) + + blobs, ok := res.([]*Blob) + assert.True(t, ok) + assert.Empty(t, blobs[0]) + }, + }, + { + name: "get proof", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + return proof, err + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + + header, err := service.headerGetter(ctx, 1) + require.NoError(t, err) + + proof, ok := res.(*Proof) + assert.True(t, ok) + + verifyFn := func(t *testing.T, rawShares [][]byte, proof *Proof, nID namespace.ID) { + for _, row := range header.DAH.RowRoots { + to := 0 + for _, p := range *proof { + from := to + to = p.End() - p.Start() + from + eq := p.VerifyInclusion(sha256.New(), nID, rawShares[from:to], row) + if eq == true { + return + } + } + } + t.Fatal("could not prove the shares") + } + + rawShares, err := BlobsToShares(blobs0[1]) + require.NoError(t, err) + verifyFn(t, rawShares, proof, blobs0[1].Namespace()) + }, + }, + { + name: "verify inclusion", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[0].Namespace(), blobs0[0].Commitment) + require.NoError(t, err) + return service.Included(ctx, 1, blobs0[0].Namespace(), proof, blobs0[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + included, ok := res.(bool) + require.True(t, ok) + require.True(t, included) + }, + }, + { + name: "verify inclusion fails with different proof", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + return service.Included(ctx, 1, blobs0[0].Namespace(), proof, blobs0[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + require.Error(t, err) + require.ErrorIs(t, err, ErrInvalidProof) + included, ok := res.(bool) + require.True(t, ok) + require.True(t, included) + }, + }, + { + name: "not included", + doFn: func() (interface{}, error) { + appBlob, err := blobtest.GenerateBlobs([]int{10}, false) + require.NoError(t, err) + blob, err := convertBlobs(appBlob...) + require.NoError(t, err) + + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + return service.Included(ctx, 1, blob[0].Namespace(), proof, blob[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + require.NoError(t, err) + included, ok := res.(bool) + require.True(t, ok) + require.False(t, included) + }, + }, + { + name: "count proofs for the blob", + doFn: func() (interface{}, error) { + proof0, err := service.GetProof(ctx, 1, blobs0[0].Namespace(), blobs0[0].Commitment) + if err != nil { + return nil, err + } + proof1, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + if err != nil { + return nil, err + } + return []*Proof{proof0, proof1}, nil + }, + expectedResult: func(i interface{}, err error) { + require.NoError(t, err) + proofs, ok := i.([]*Proof) + require.True(t, ok) + + h, err := service.headerGetter(ctx, 1) + require.NoError(t, err) + + originalDataWidth := len(h.DAH.RowRoots) / 2 + sizes := []int{blobSize0, blobSize1} + for i, proof := range proofs { + require.True(t, sizes[i]/originalDataWidth+1 == proof.Len()) + } + }, + }, + { + name: "get all not found", + doFn: func() (interface{}, error) { + nID := tmrand.Bytes(appconsts.NamespaceSize) + return service.GetAll(ctx, 1, []namespace.ID{nID}) + }, + expectedResult: func(i interface{}, err error) { + blobs, ok := i.([]*Blob) + require.True(t, ok) + assert.Empty(t, blobs) + require.Error(t, err) + require.ErrorIs(t, err, ErrBlobNotFound) + + }, + }, + { + name: "marshal proof", + doFn: func() (interface{}, error) { + proof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + return proof.MarshalJSON() + }, + expectedResult: func(i interface{}, err error) { + require.NoError(t, err) + jsonData, ok := i.([]byte) + require.True(t, ok) + var proof Proof + require.NoError(t, proof.UnmarshalJSON(jsonData)) + + newProof, err := service.GetProof(ctx, 1, blobs0[1].Namespace(), blobs0[1].Commitment) + require.NoError(t, err) + require.NoError(t, proof.equal(*newProof)) + }, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + blobs, err := tt.doFn() + tt.expectedResult(blobs, err) + }) + } +} + +// TestService_GetSingleBlobWithoutPadding creates two blobs with the same nID +// But to satisfy the rule of eds creating, padding namespace share is placed between +// blobs. Test ensures that blob service will skip padding share and return the correct blob. +func TestService_GetSingleBlobWithoutPadding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + appBlob, err := blobtest.GenerateBlobs([]int{9, 5}, true) + require.NoError(t, err) + blobs, err := convertBlobs(appBlob...) + require.NoError(t, err) + + ns1, err := appns.New(blobs[0].Namespace()[0], blobs[0].Namespace()[appns.NamespaceVersionSize:]) + require.NoError(t, err) + + ns2, err := appns.New(blobs[1].Namespace()[0], blobs[1].Namespace()[appns.NamespaceVersionSize:]) + require.NoError(t, err) + + padding0, err := shares.NamespacePaddingShare(ns1) + require.NoError(t, err) + padding1, err := shares.NamespacePaddingShare(ns2) + require.NoError(t, err) + rawShares0, err := BlobsToShares(blobs[0]) + require.NoError(t, err) + rawShares1, err := BlobsToShares(blobs[1]) + require.NoError(t, err) + + rawShares := make([][]byte, 0) + rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) + rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) + + bs := mdutils.Bserv() + batching := ds_sync.MutexWrap(ds.NewMapDatastore()) + headerStore, err := store.NewStore[*header.ExtendedHeader](batching) + require.NoError(t, err) + eds, err := share.AddShares(ctx, rawShares, bs) + require.NoError(t, err) + + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + err = headerStore.Init(ctx, h) + require.NoError(t, err) + + fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return headerStore.GetByHeight(ctx, height) + } + service := NewService(nil, getters.NewIPLDGetter(bs), fn) + + newBlob, err := service.Get(ctx, 1, blobs[1].Namespace(), blobs[1].Commitment) + require.NoError(t, err) + assert.Equal(t, newBlob.Commitment, blobs[1].Commitment) +} + +func TestService_GetAllWithoutPadding(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + appBlob, err := blobtest.GenerateBlobs([]int{9, 5}, true) + require.NoError(t, err) + blobs, err := convertBlobs(appBlob...) + require.NoError(t, err) + + ns1, err := appns.New( + blobs[0].Namespace()[appns.NamespaceVersionSize-1], + blobs[0].Namespace()[appns.NamespaceVersionSize:], + ) + require.NoError(t, err) + + ns2, err := appns.New( + blobs[1].Namespace()[appns.NamespaceVersionSize-1], + blobs[1].Namespace()[appns.NamespaceVersionSize:], + ) + require.NoError(t, err) + + padding0, err := shares.NamespacePaddingShare(ns1) + require.NoError(t, err) + padding1, err := shares.NamespacePaddingShare(ns2) + require.NoError(t, err) + rawShares0, err := BlobsToShares(blobs[0]) + require.NoError(t, err) + rawShares1, err := BlobsToShares(blobs[1]) + require.NoError(t, err) + rawShares := make([][]byte, 0) + + // create shares in correct order with padding shares + if bytes.Compare(blobs[0].Namespace(), blobs[1].Namespace()) <= 0 { + rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) + rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) + } else { + rawShares = append(rawShares, append(rawShares1, padding1.ToBytes())...) + rawShares = append(rawShares, append(rawShares0, padding0.ToBytes())...) + } + + bs := mdutils.Bserv() + batching := ds_sync.MutexWrap(ds.NewMapDatastore()) + headerStore, err := store.NewStore[*header.ExtendedHeader](batching) + require.NoError(t, err) + eds, err := share.AddShares(ctx, rawShares, bs) + require.NoError(t, err) + + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + err = headerStore.Init(ctx, h) + require.NoError(t, err) + + fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return headerStore.GetByHeight(ctx, height) + } + + service := NewService(nil, getters.NewIPLDGetter(bs), fn) + + _, err = service.GetAll(ctx, 1, []namespace.ID{blobs[0].Namespace(), blobs[1].Namespace()}) + require.NoError(t, err) +} + +func createService(ctx context.Context, t *testing.T, blobs []*Blob) *Service { + bs := mdutils.Bserv() + batching := ds_sync.MutexWrap(ds.NewMapDatastore()) + headerStore, err := store.NewStore[*header.ExtendedHeader](batching) + require.NoError(t, err) + rawShares, err := BlobsToShares(blobs...) + require.NoError(t, err) + eds, err := share.AddShares(ctx, rawShares, bs) + require.NoError(t, err) + + h := headertest.ExtendedHeaderFromEDS(t, 1, eds) + err = headerStore.Init(ctx, h) + require.NoError(t, err) + + fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + return headerStore.GetByHeight(ctx, height) + } + return NewService(nil, getters.NewIPLDGetter(bs), fn) +} diff --git a/cmd/auth.go b/cmd/auth.go index c4eb057a29..eb2000675e 100644 --- a/cmd/auth.go +++ b/cmd/auth.go @@ -14,6 +14,7 @@ import ( flag "github.com/spf13/pflag" "github.com/celestiaorg/celestia-node/api/rpc/perms" + "github.com/celestiaorg/celestia-node/libs/authtoken" "github.com/celestiaorg/celestia-node/libs/keystore" nodemod "github.com/celestiaorg/celestia-node/nodebuilder/node" ) @@ -70,14 +71,12 @@ func newToken(cmd *cobra.Command, args []string) error { return err } - token, err := jwt.NewTokenBuilder(signer).Build(&perms.JWTPayload{ - Allow: permissions, - }) + token, err := authtoken.NewSignedJWT(signer, permissions) if err != nil { return err } - fmt.Printf("%s", token.InsecureString()) + fmt.Printf("%s", token) return nil } diff --git a/cmd/celestia/rpc.go b/cmd/celestia/rpc.go index afbe13c3f8..767fca872c 100644 --- a/cmd/celestia/rpc.go +++ b/cmd/celestia/rpc.go @@ -5,7 +5,6 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" - "errors" "fmt" "io" "log" @@ -15,10 +14,13 @@ import ( "strconv" "strings" - "github.com/cosmos/cosmos-sdk/types" "github.com/spf13/cobra" + "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/state" ) @@ -28,6 +30,7 @@ const ( var requestURL string var authTokenFlag string +var printRequest bool type jsonRPCRequest struct { ID int64 `json:"id"` @@ -36,6 +39,11 @@ type jsonRPCRequest struct { Params []interface{} `json:"params"` } +type outputWithRequest struct { + Request jsonRPCRequest + Response json.RawMessage +} + func init() { rpcCmd.PersistentFlags().StringVar( &requestURL, @@ -49,6 +57,12 @@ func init() { "", "Authorization token (if not provided, the "+authEnvKey+" environment variable will be used)", ) + rpcCmd.PersistentFlags().BoolVar( + &printRequest, + "print-request", + false, + "Print JSON-RPC request along with the response", + ) rootCmd.AddCommand(rpcCmd) } @@ -98,52 +112,120 @@ func parseParams(method string, params []string) []interface{} { } parsedParams[0] = root // 2. NamespaceID - if strings.HasPrefix(params[1], "0x") { - decoded, err := hex.DecodeString(params[1][2:]) - if err != nil { - panic("Error decoding namespace ID: hex string could not be decoded.") - } - parsedParams[1] = decoded - } else { - // otherwise, it's just a base64 string - parsedParams[1] = params[1] + nID, err := parseNamespace(params[1]) + if err != nil { + panic(fmt.Sprintf("Error parsing namespace: %v", err)) } - return parsedParams - case "SubmitPayForBlob": + parsedParams[1] = nID + case "Submit": // 1. NamespaceID - if strings.HasPrefix(params[0], "0x") { - decoded, err := hex.DecodeString(params[0][2:]) - if err != nil { - panic("Error decoding namespace ID: hex string could not be decoded.") - } - parsedParams[0] = decoded - } else { - // otherwise, it's just a base64 string - parsedParams[0] = params[0] + var err error + nID, err := parseNamespace(params[0]) + if err != nil { + panic(fmt.Sprintf("Error parsing namespace: %v", err)) } - // 2. Blob + // 2. Blob data + var blobData []byte switch { case strings.HasPrefix(params[1], "0x"): decoded, err := hex.DecodeString(params[1][2:]) if err != nil { panic("Error decoding blob: hex string could not be decoded.") } - parsedParams[0] = decoded + blobData = decoded case strings.HasPrefix(params[1], "\""): // user input an utf string that needs to be encoded to base64 - parsedParams[1] = base64.StdEncoding.EncodeToString([]byte(params[1])) + src := []byte(params[1]) + blobData = make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(blobData, []byte(params[1])) default: // otherwise, we assume the user has already encoded their input to base64 - parsedParams[1] = params[1] + blobData, err = base64.StdEncoding.DecodeString(params[1]) + if err != nil { + panic("Error decoding blob data: base64 string could not be decoded.") + } } - // 3. Fee (state.Int is a string) - parsedParams[2] = params[2] - // 4. GasLimit (uint64) - num, err := strconv.ParseUint(params[3], 10, 64) + parsedBlob, err := blob.NewBlob(0, nID, blobData) + if err != nil { + panic(fmt.Sprintf("Error creating blob: %v", err)) + } + parsedParams[0] = []*blob.Blob{parsedBlob} + // param count doesn't match input length, so cut off nil values + return parsedParams[:1] + case "SubmitPayForBlob": + // 1. Fee (state.Int is a string) + parsedParams[0] = params[0] + // 2. GasLimit (uint64) + num, err := strconv.ParseUint(params[1], 10, 64) if err != nil { panic("Error parsing gas limit: uint64 could not be parsed.") } - parsedParams[3] = num + parsedParams[1] = num + // 3. NamespaceID + nID, err := parseNamespace(params[2]) + if err != nil { + panic(fmt.Sprintf("Error parsing namespace: %v", err)) + } + // 4. Blob data + var blobData []byte + switch { + case strings.HasPrefix(params[3], "0x"): + decoded, err := hex.DecodeString(params[3][2:]) + if err != nil { + panic("Error decoding blob: hex string could not be decoded.") + } + blobData = decoded + case strings.HasPrefix(params[3], "\""): + // user input an utf string that needs to be encoded to base64 + src := []byte(params[1]) + blobData = make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(blobData, []byte(params[3])) + default: + // otherwise, we assume the user has already encoded their input to base64 + blobData, err = base64.StdEncoding.DecodeString(params[3]) + if err != nil { + panic("Error decoding blob: base64 string could not be decoded.") + } + } + parsedBlob, err := blob.NewBlob(0, nID, blobData) + if err != nil { + panic(fmt.Sprintf("Error creating blob: %v", err)) + } + parsedParams[2] = []*blob.Blob{parsedBlob} + return parsedParams[:3] + case "Get": + // 1. Height + num, err := strconv.ParseUint(params[0], 10, 64) + if err != nil { + panic("Error parsing gas limit: uint64 could not be parsed.") + } + parsedParams[0] = num + // 2. NamespaceID + nID, err := parseNamespace(params[1]) + if err != nil { + panic(fmt.Sprintf("Error parsing namespace: %v", err)) + } + parsedParams[1] = nID + // 3: Commitment + commitment, err := base64.StdEncoding.DecodeString(params[2]) + if err != nil { + panic("Error decoding commitment: base64 string could not be decoded.") + } + parsedParams[2] = commitment + return parsedParams + case "GetAll": // NOTE: Over the cli, you can only pass one namespace + // 1. Height + num, err := strconv.ParseUint(params[0], 10, 64) + if err != nil { + panic("Error parsing gas limit: uint64 could not be parsed.") + } + parsedParams[0] = num + // 2. NamespaceID + nID, err := parseNamespace(params[1]) + if err != nil { + panic(fmt.Sprintf("Error parsing namespace: %v", err)) + } + parsedParams[1] = []namespace.ID{nID} return parsedParams case "QueryDelegation", "QueryUnbonding", "BalanceForAddress": var err error @@ -284,22 +366,36 @@ func sendJSONRPCRequest(namespace, method string, params []interface{}) { log.Fatalf("Error reading response body: %v", err) //nolint:gocritic } - fmt.Println(string(responseBody)) -} - -func parseAddressFromString(addrStr string) (state.Address, error) { - var addr state.AccAddress - addr, err := types.AccAddressFromBech32(addrStr) + rawResponseJSON, err := parseJSON(string(responseBody)) if err != nil { - // first check if it is a validator address and can be converted - valAddr, err := types.ValAddressFromBech32(addrStr) + panic(err) + } + if printRequest { + output, err := json.MarshalIndent(outputWithRequest{ + Request: request, + Response: rawResponseJSON, + }, "", " ") if err != nil { - return nil, errors.New("address must be a valid account or validator address ") + panic(fmt.Sprintf("Error marshaling JSON-RPC response: %v", err)) } - return valAddr, nil + fmt.Println(string(output)) + return } - return addr, nil + output, err := json.MarshalIndent(rawResponseJSON, "", " ") + if err != nil { + panic(fmt.Sprintf("Error marshaling JSON-RPC response: %v", err)) + } + fmt.Println(string(output)) +} + +func parseAddressFromString(addrStr string) (state.Address, error) { + var address state.Address + err := address.UnmarshalJSON([]byte(addrStr)) + if err != nil { + return address, err + } + return address, nil } func parseSignatureForHelpstring(methodSig reflect.StructField) string { @@ -322,6 +418,32 @@ func parseSignatureForHelpstring(methodSig reflect.StructField) string { return simplifiedSignature } +func parseNamespace(param string) (namespace.ID, error) { + var nID []byte + var err error + if strings.HasPrefix(param, "0x") { + decoded, err := hex.DecodeString(param[2:]) + if err != nil { + return nil, fmt.Errorf("error decoding namespace ID: %w", err) + } + nID = decoded + } else { + // otherwise, it's just a base64 string + nID, err = base64.StdEncoding.DecodeString(param) + if err != nil { + return nil, fmt.Errorf("error decoding namespace ID: %w", err) + } + } + // if the namespace ID is 8 bytes, add v0 share + namespace prefix and zero pad + if len(nID) == 8 { + nID, err = share.NewNamespaceV0(nID) + if err != nil { + return nil, err + } + } + return nID, nil +} + func parseJSON(param string) (json.RawMessage, error) { var raw json.RawMessage err := json.Unmarshal([]byte(param), &raw) diff --git a/core/eds.go b/core/eds.go index c435f0e649..dc9b2b4a9e 100644 --- a/core/eds.go +++ b/core/eds.go @@ -24,7 +24,7 @@ func extendBlock(data types.Data) (*rsmt2d.ExtendedDataSquare, error) { return nil, nil } - sqr, err := square.Construct(data.Txs.ToSliceOfBytes(), appconsts.MaxSquareSize) + sqr, err := square.Construct(data.Txs.ToSliceOfBytes(), appconsts.LatestVersion, share.MaxSquareSize) if err != nil { return nil, err } diff --git a/das/checkpoint.go b/das/checkpoint.go index a38eca828c..bb023a19da 100644 --- a/das/checkpoint.go +++ b/das/checkpoint.go @@ -23,15 +23,15 @@ type workerCheckpoint struct { func newCheckpoint(stats SamplingStats) checkpoint { workers := make([]workerCheckpoint, 0, len(stats.Workers)) for _, w := range stats.Workers { - // no need to store retry jobs, since they will resume from failed heights map - if w.JobType == retryJob { - continue + // no need to resume recent jobs after restart. On the other hand, retry jobs will resume from + // failed heights map. it leaves only catchup jobs to be stored and resumed + if w.JobType == catchupJob { + workers = append(workers, workerCheckpoint{ + From: w.Curr, + To: w.To, + JobType: w.JobType, + }) } - workers = append(workers, workerCheckpoint{ - From: w.Curr, - To: w.To, - JobType: w.JobType, - }) } return checkpoint{ SampleFrom: stats.CatchupHead + 1, diff --git a/das/coordinator.go b/das/coordinator.go index 2184dce2c8..852a40d24d 100644 --- a/das/coordinator.go +++ b/das/coordinator.go @@ -81,7 +81,9 @@ func (sc *samplingCoordinator) run(ctx context.Context, cp checkpoint) { select { case head := <-sc.updHeadCh: if sc.state.isNewHead(head.Height()) { - sc.runWorker(ctx, sc.state.recentJob(head)) + if !sc.recentJobsLimitReached() { + sc.runWorker(ctx, sc.state.recentJob(head)) + } sc.state.updateHead(head.Height()) // run worker without concurrency limit restrictions to reduced delay sc.metrics.observeNewHead(ctx) @@ -146,3 +148,8 @@ func (sc *samplingCoordinator) getCheckpoint(ctx context.Context) (checkpoint, e func (sc *samplingCoordinator) concurrencyLimitReached() bool { return len(sc.state.inProgress) >= sc.concurrencyLimit } + +// recentJobsLimitReached indicates whether concurrency limit for recent jobs has been reached +func (sc *samplingCoordinator) recentJobsLimitReached() bool { + return len(sc.state.inProgress) >= 2*sc.concurrencyLimit +} diff --git a/das/stats.go b/das/stats.go index 5799370f91..dda6be6cc0 100644 --- a/das/stats.go +++ b/das/stats.go @@ -34,7 +34,10 @@ type WorkerStats struct { func (s SamplingStats) totalSampled() uint64 { var inProgress uint64 for _, w := range s.Workers { - inProgress += w.To - w.Curr + 1 + // don't count recent jobs, since heights they are working on are after catchup head + if w.JobType != recentJob { + inProgress += w.To - w.Curr + 1 + } } return s.CatchupHead - inProgress - uint64(len(s.Failed)) } diff --git a/go.mod b/go.mod index abdc7b6afc..5f5fe104a0 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,12 @@ go 1.20 replace github.com/ipfs/go-verifcid => github.com/celestiaorg/go-verifcid v0.0.1-lazypatch require ( + cosmossdk.io/errors v1.0.0-beta.7 cosmossdk.io/math v1.0.0-beta.3 - github.com/BurntSushi/toml v1.2.1 + github.com/BurntSushi/toml v1.3.0 github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 - github.com/benbjohnson/clock v1.3.0 - github.com/celestiaorg/celestia-app v1.0.0-rc0 + github.com/benbjohnson/clock v1.3.5 + github.com/celestiaorg/celestia-app v1.0.0-rc2 github.com/celestiaorg/go-fraud v0.1.0 github.com/celestiaorg/go-header v0.2.7 github.com/celestiaorg/go-libp2p-messenger v0.2.0 @@ -21,16 +22,16 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.4 github.com/etclabscore/go-openrpc-reflect v0.0.37 github.com/filecoin-project/dagstore v0.5.6 - github.com/filecoin-project/go-jsonrpc v0.1.9 + github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/gammazero/workerpool v1.1.3 github.com/gogo/protobuf v1.3.3 github.com/golang/mock v1.6.0 github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-retryablehttp v0.7.2 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/imdario/mergo v0.3.15 + github.com/imdario/mergo v0.3.16 github.com/ipfs/go-blockservice v0.5.0 - github.com/ipfs/go-cid v0.3.2 + github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-badger2 v0.1.3 github.com/ipfs/go-ipfs-blockstore v1.2.0 @@ -43,24 +44,24 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-merkledag v0.10.0 github.com/ipld/go-car v0.6.0 - github.com/libp2p/go-libp2p v0.26.3 - github.com/libp2p/go-libp2p-kad-dht v0.21.0 + github.com/libp2p/go-libp2p v0.28.0 + github.com/libp2p/go-libp2p-kad-dht v0.21.1 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 github.com/libp2p/go-libp2p-routing-helpers v0.6.1 - github.com/minio/sha256-simd v1.0.0 + github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.8.0 + github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/multiformats/go-multihash v0.2.2-0.20221030163302-608669da49b6 + github.com/multiformats/go-multihash v0.2.2 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/prometheus/client_golang v1.14.0 - github.com/pyroscope-io/client v0.7.0 + github.com/pyroscope-io/client v0.7.1 github.com/pyroscope-io/otel-profiling-go v0.4.0 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.3 + github.com/stretchr/testify v1.8.4 github.com/tendermint/tendermint v0.34.24 go.opentelemetry.io/otel v1.13.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.34.0 @@ -70,13 +71,14 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.34.0 go.opentelemetry.io/otel/trace v1.13.0 go.opentelemetry.io/proto/otlp v0.19.0 - go.uber.org/fx v1.19.2 + go.uber.org/fx v1.19.3 go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.7.0 - golang.org/x/sync v0.1.0 - golang.org/x/text v0.8.0 + golang.org/x/crypto v0.9.0 + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 + golang.org/x/sync v0.2.0 + golang.org/x/text v0.9.0 google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 + google.golang.org/protobuf v1.30.0 ) require ( @@ -85,7 +87,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/storage v1.27.0 // indirect - cosmossdk.io/errors v1.0.0-beta.7 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect @@ -105,12 +106,12 @@ require ( github.com/cenkalti/backoff/v4 v4.2.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chzyer/readline v1.5.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect github.com/cometbft/cometbft-db v0.7.0 // indirect github.com/confio/ics23/go v0.9.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-proto v1.0.0-alpha8 // indirect @@ -118,7 +119,7 @@ require ( github.com/cosmos/gogoproto v1.4.2 // indirect github.com/cosmos/gorocksdb v1.2.0 // indirect github.com/cosmos/iavl v0.19.5 // indirect - github.com/cosmos/ibc-go/v6 v6.1.0 // indirect + github.com/cosmos/ibc-go/v6 v6.1.1 // indirect github.com/cosmos/ledger-cosmos-go v0.12.2 // indirect github.com/creachadair/taskgroup v0.3.2 // indirect github.com/cskr/pubsub v1.0.2 // indirect @@ -126,7 +127,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect @@ -136,7 +137,7 @@ require ( github.com/dvsekhvalnov/jose2go v1.5.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect - github.com/ethereum/go-ethereum v1.11.6 // indirect + github.com/ethereum/go-ethereum v1.12.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -145,7 +146,7 @@ require ( github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -153,19 +154,19 @@ require ( github.com/go-openapi/spec v0.19.11 // indirect github.com/go-openapi/swag v0.19.11 // indirect github.com/go-stack/stack v1.8.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/gateway v1.1.0 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/orderedcode v0.0.1 // indirect - github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect @@ -184,11 +185,11 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect - github.com/huin/goupnp v1.0.3 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/iancoleman/orderedmap v0.1.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect @@ -216,29 +217,29 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/reedsolomon v1.11.1 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect github.com/lib/pq v1.10.6 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-reuseport v0.3.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/dns v1.1.54 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect @@ -249,11 +250,11 @@ require ( github.com/mtibben/percent v0.2.1 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.8.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.5.1 // indirect + github.com/onsi/ginkgo/v2 v2.9.7 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -264,15 +265,15 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.39.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/pyroscope-io/godeltaprof v0.1.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.2.1 // indirect - github.com/quic-go/qtls-go1-20 v0.1.1 // indirect + github.com/quic-go/qtls-go1-19 v0.3.2 // indirect + github.com/quic-go/qtls-go1-20 v0.2.2 // indirect github.com/quic-go/quic-go v0.33.0 // indirect - github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/rakyll/statik v0.1.7 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect @@ -306,16 +307,15 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/dig v1.16.1 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect + golang.org/x/tools v0.9.1 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -324,7 +324,7 @@ require ( gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect + lukechampine.com/blake3 v1.2.1 // indirect nhooyr.io/websocket v1.8.7 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) @@ -333,5 +333,5 @@ replace ( github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.13.0-sdk-v0.46.11 github.com/filecoin-project/dagstore => github.com/celestiaorg/dagstore v0.0.0-20230413141458-735ab09a15d6 github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.21.0-tm-v0.34.27 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.21.2-tm-v0.34.27 ) diff --git a/go.sum b/go.sum index 9eca643fef..e548acff6a 100644 --- a/go.sum +++ b/go.sum @@ -220,8 +220,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.0 h1:Ws8e5YmnrGEHzZEzg0YvK/7COGYtTC5PbaH9oSSbgfA= +github.com/BurntSushi/toml v1.3.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= @@ -295,8 +295,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbE github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -341,10 +342,10 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.0.0-rc0 h1:wpuP5fTIEbLCP+U5pGwKfSzXUTE/bE8oqKECFN5yoO0= -github.com/celestiaorg/celestia-app v1.0.0-rc0/go.mod h1:C8pNwFQWBLYIGpdrFesO1uezthrKjv0H5meecYQc1ek= -github.com/celestiaorg/celestia-core v1.21.0-tm-v0.34.27 h1:EdkqFRBypVEq/nX2ZE7KQ6dTlN8j3rEYe+WGahWuSUk= -github.com/celestiaorg/celestia-core v1.21.0-tm-v0.34.27/go.mod h1:GVo91Wifg9KL/nFx9nPkpl0UIFdvvs4fhnly9GhGxZU= +github.com/celestiaorg/celestia-app v1.0.0-rc2 h1:/u7eespYtBpQtBSz3P8/rKfz9rW7QOxkH8ebh8T4VxI= +github.com/celestiaorg/celestia-app v1.0.0-rc2/go.mod h1:uiTWKTtRpVwvSiFDl2zausrU1ZBHBWgk7z52pfzJqJU= +github.com/celestiaorg/celestia-core v1.21.2-tm-v0.34.27 h1:nmr9O5BflgNR1aWehs1ZFw4obA//M/+g+SrSMK9sOBA= +github.com/celestiaorg/celestia-core v1.21.2-tm-v0.34.27/go.mod h1:GVo91Wifg9KL/nFx9nPkpl0UIFdvvs4fhnly9GhGxZU= github.com/celestiaorg/cosmos-sdk v1.13.0-sdk-v0.46.11 h1:Rd5EvJx1nG3KurBspVN51RVmvif0Lp2UVURbG2ad3Cs= github.com/celestiaorg/cosmos-sdk v1.13.0-sdk-v0.46.11/go.mod h1:xCG6OUkJy5KUMEg20Zk010lra9XjkmKS3+bk0wp7bd8= github.com/celestiaorg/dagstore v0.0.0-20230413141458-735ab09a15d6 h1:/yCwMCoOPcYCiG18u8/1pv5eXF04xczoQO3sR0bKsgM= @@ -382,14 +383,14 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.0 h1:+eqR0HfOetur4tgnC8ftU5imRnhi4te+BadWS95c5AM= -github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.0 h1:lSwwFrbNviGePhkewF1az4oLmcwqCZijQ2/Wi3BGHAI= -github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23 h1:dZ0/VyGgQdVGAss6Ju0dt5P0QltE0SFY5Woh6hbIfiQ= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= @@ -428,8 +429,8 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -460,8 +461,8 @@ github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4 github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= github.com/cosmos/iavl v0.19.5 h1:rGA3hOrgNxgRM5wYcSCxgQBap7fW82WZgY78V9po/iY= github.com/cosmos/iavl v0.19.5/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= -github.com/cosmos/ibc-go/v6 v6.1.0 h1:o7oXws2vKkKfOFzJI+oNylRn44PCNt5wzHd/zKQKbvQ= -github.com/cosmos/ibc-go/v6 v6.1.0/go.mod h1:CY3zh2HLfetRiW8LY6kVHMATe90Wj/UOoY8T6cuB0is= +github.com/cosmos/ibc-go/v6 v6.1.1 h1:oqqMNyjj6SLQF8rvgCaDGwfdITEIsbhs8F77/8xvRIo= +github.com/cosmos/ibc-go/v6 v6.1.1/go.mod h1:NL17FpFAaWjRFVb1T7LUKuOoMSsATPpu+Icc4zL5/Ik= github.com/cosmos/ledger-cosmos-go v0.12.2 h1:/XYaBlE2BJxtvpkHiBm97gFGSGmYGKunKyF3nNqAXZA= github.com/cosmos/ledger-cosmos-go v0.12.2/go.mod h1:ZcqYgnfNJ6lAXe4HPtWgarNEY+B74i+2/8MhZw4ziiI= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= @@ -492,11 +493,11 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= @@ -543,7 +544,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= @@ -564,8 +564,8 @@ github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWS github.com/etclabscore/go-openrpc-reflect v0.0.37 h1:IH0e7JqIvR9OhbbFWi/BHIkXrqbR3Zyia3RJ733eT6c= github.com/etclabscore/go-openrpc-reflect v0.0.37/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= github.com/ethereum/go-ethereum v1.10.17/go.mod h1:Lt5WzjM07XlXc95YzrhosmR4J9Ahd6X2wyEV2SvGhk0= -github.com/ethereum/go-ethereum v1.11.6 h1:2VF8Mf7XiSUfmoNOy3D+ocfl9Qu8baQBrCNbo2CXQ8E= -github.com/ethereum/go-ethereum v1.11.6/go.mod h1:+a8pUj1tOyJ2RinsNQD4326YS+leSoKGiG/uVVb0x6Y= +github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= +github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= @@ -574,8 +574,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/filecoin-project/go-jsonrpc v0.1.9 h1:HRWLxo7HAWzI3xZGeFG4LZJoYpms+Q+8kwmMTLnyS3A= -github.com/filecoin-project/go-jsonrpc v0.1.9/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-jsonrpc v0.3.1 h1:qwvAUc5VwAkooquKJmfz9R2+F8znhiqcNHYjEp/NM10= +github.com/filecoin-project/go-jsonrpc v0.3.1/go.mod h1:jBSvPTl8V1N7gSTuCR4bis8wnQnIjHbRPpROol6iQKM= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -634,8 +634,8 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -671,8 +671,9 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= @@ -733,8 +734,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -795,8 +797,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -830,7 +832,6 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -903,8 +904,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= -github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= +github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -922,8 +923,9 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= @@ -931,8 +933,8 @@ github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -974,8 +976,8 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -1052,7 +1054,6 @@ github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JP github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= @@ -1143,15 +1144,15 @@ github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8 github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/reedsolomon v1.11.1 h1:0gCWQXOB8pVe1Y5SGozDA5t2qoVxX3prsV+qHgI/Fik= @@ -1160,8 +1161,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -1209,11 +1210,11 @@ github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniV github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.19.0/go.mod h1:Ki9jJXLO2YqrTIFxofV7Twyd3INWPT97+r8hGt7XPjI= -github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM= -github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= +github.com/libp2p/go-libp2p v0.28.0 h1:zO8cY98nJiPzZpFv5w5gqqb8aVzt4ukQ0nVOSaaKhJ8= +github.com/libp2p/go-libp2p v0.28.0/go.mod h1:s3Xabc9LSwOcnv9UD4nORnXKTsWkPMkIMB/JIGXVnzk= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= @@ -1258,8 +1259,8 @@ github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxn github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.21.0 h1:J0Yd22VA+sk0CJRGMgtfHvLVIkZDyJ3AJGiljywIw5U= -github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I= +github.com/libp2p/go-libp2p-kad-dht v0.21.1 h1:xpfp8/t9+X2ip1l8Umap1/UGNnJ3RHJgKGAEsnRAlTo= +github.com/libp2p/go-libp2p-kad-dht v0.21.1/go.mod h1:Oy8wvbdjpB70eS5AaFaI68tOtrdo3KylTvXDjikxqFo= github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= @@ -1364,8 +1365,9 @@ github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0 github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1381,8 +1383,8 @@ github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= +github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= @@ -1471,8 +1473,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -1491,8 +1493,8 @@ github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1510,8 +1512,9 @@ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -1567,8 +1570,8 @@ github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9x github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= +github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1587,13 +1590,13 @@ github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.8.0 h1:evBmgkbSQux+Ds2IgfhkO38Dl2GDtRW8/Rp6YiSHX/Q= -github.com/multiformats/go-multicodec v0.8.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1602,8 +1605,8 @@ github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUj github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.2-0.20221030163302-608669da49b6 h1:qLF997Rz0X1WvdcZ2r5CUkLZ2rvdiXwG1JRSrJZEAuE= -github.com/multiformats/go-multihash v0.2.2-0.20221030163302-608669da49b6/go.mod h1:kaHxr8TfO1cxIR/tYxgZ7e59HraJq8arEQQR8E/YNvI= +github.com/multiformats/go-multihash v0.2.2 h1:Uu7LWs/PmWby1gkj1S1DXx3zyd3aVabA4FiMKn/2tAc= +github.com/multiformats/go-multihash v0.2.2/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= @@ -1655,8 +1658,8 @@ github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvw github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= -github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1664,7 +1667,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= @@ -1745,8 +1748,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1775,22 +1778,22 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pyroscope-io/client v0.7.0 h1:LWuuqPQ1oa6x7BnmUOuo/aGwdX85QGhWZUBYWWW3zdk= -github.com/pyroscope-io/client v0.7.0/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= +github.com/pyroscope-io/client v0.7.1 h1:yFRhj3vbgjBxehvxQmedmUWJQ4CAfCHhn+itPsuWsHw= +github.com/pyroscope-io/client v0.7.1/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= github.com/pyroscope-io/godeltaprof v0.1.0 h1:UBqtjt0yZi4jTxqZmLAs34XG6ycS3vUTlhEUSq4NHLE= github.com/pyroscope-io/godeltaprof v0.1.0/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= github.com/pyroscope-io/otel-profiling-go v0.4.0 h1:Hk/rbUqOWoByoWy1tt4r5BX5xoKAvs5drr0511Ki8ic= github.com/pyroscope-io/otel-profiling-go v0.4.0/go.mod h1:MXaofiWU7PgLP7eISUZJYVO4Z8WYMqpkYgeP4XrPLyg= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.2.1 h1:aJcKNMkH5ASEJB9FXNeZCyTEIHU1J7MmHyz1Q1TSG1A= -github.com/quic-go/qtls-go1-19 v0.2.1/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3wSwQk= -github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/qtls-go1-19 v0.3.2 h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc86Z5U= +github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E= +github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= -github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= -github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= +github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= @@ -1926,8 +1929,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= @@ -2082,12 +2085,12 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= -go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= -go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= -go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.19.3 h1:YqMRE4+2IepTYCMOvXqQpRa+QAVdiSTnsHU4XNWBceA= +go.uber.org/fx v1.19.3/go.mod h1:w2HrQg26ql9fLK7hlBiZ6JsRUKV+Lj/atT1KCjT8YhM= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -2152,8 +2155,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2171,8 +2174,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2204,8 +2207,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2286,8 +2289,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2332,8 +2335,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2456,20 +2459,21 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2480,8 +2484,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2561,8 +2565,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2821,8 +2825,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2875,8 +2879,9 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= diff --git a/header/headertest/testing.go b/header/headertest/testing.go index 76bda50db5..3e0da71d69 100644 --- a/header/headertest/testing.go +++ b/header/headertest/testing.go @@ -321,6 +321,31 @@ func FraudMaker(t *testing.T, faultHeight int64, bServ blockservice.BlockService } } +func ExtendedHeaderFromEDS(t *testing.T, height uint64, eds *rsmt2d.ExtendedDataSquare) *header.ExtendedHeader { + valSet, vals := RandValidatorSet(10, 10) + gen := RandRawHeader(t) + dah := da.NewDataAvailabilityHeader(eds) + + gen.DataHash = dah.Hash() + gen.ValidatorsHash = valSet.Hash() + gen.NextValidatorsHash = valSet.Hash() + gen.Height = int64(height) + blockID := RandBlockID(t) + blockID.Hash = gen.Hash() + voteSet := types.NewVoteSet(gen.ChainID, gen.Height, 0, tmproto.PrecommitType, valSet) + commit, err := MakeCommit(blockID, gen.Height, 0, voteSet, vals, time.Now()) + require.NoError(t, err) + + eh := &header.ExtendedHeader{ + RawHeader: *gen, + Commit: commit, + ValidatorSet: valSet, + DAH: &dah, + } + require.NoError(t, eh.Validate()) + return eh +} + func CreateFraudExtHeader( t *testing.T, eh *header.ExtendedHeader, diff --git a/libs/authtoken/authtoken.go b/libs/authtoken/authtoken.go new file mode 100644 index 0000000000..3d6645c972 --- /dev/null +++ b/libs/authtoken/authtoken.go @@ -0,0 +1,36 @@ +package authtoken + +import ( + "encoding/json" + + "github.com/cristalhq/jwt" + "github.com/filecoin-project/go-jsonrpc/auth" + + "github.com/celestiaorg/celestia-node/api/rpc/perms" +) + +// ExtractSignedPermissions returns the permissions granted to the token by the passed signer. +// If the token isn't signed by the signer, it will not pass verification. +func ExtractSignedPermissions(signer jwt.Signer, token string) ([]auth.Permission, error) { + tk, err := jwt.ParseAndVerifyString(token, signer) + if err != nil { + return nil, err + } + p := new(perms.JWTPayload) + err = json.Unmarshal(tk.RawClaims(), p) + if err != nil { + return nil, err + } + return p.Allow, nil +} + +// NewSignedJWT returns a signed JWT token with the passed permissions and signer. +func NewSignedJWT(signer jwt.Signer, permissions []auth.Permission) (string, error) { + token, err := jwt.NewTokenBuilder(signer).Build(&perms.JWTPayload{ + Allow: permissions, + }) + if err != nil { + return "", err + } + return token.InsecureString(), nil +} diff --git a/libs/utils/address.go b/libs/utils/address.go index 5971bf5a98..a8170e44b9 100644 --- a/libs/utils/address.go +++ b/libs/utils/address.go @@ -30,11 +30,14 @@ func ValidateAddr(addr string) (string, error) { } if ip := net.ParseIP(addr); ip == nil { - _, err = net.LookupHost(addr) + addrs, err := net.LookupHost(addr) if err != nil { - return addr, fmt.Errorf("could not resolve hostname or ip: %w", err) + return addr, fmt.Errorf("could not resolve %v: %w", addr, err) } + if len(addrs) == 0 { + return addr, fmt.Errorf("no IP addresses found for DNS record: %v", addr) + } + addr = addrs[0] } - return addr, nil } diff --git a/libs/utils/address_test.go b/libs/utils/address_test.go index c914a7d853..15452f4d1b 100644 --- a/libs/utils/address_test.go +++ b/libs/utils/address_test.go @@ -1,6 +1,7 @@ package utils import ( + "net" "testing" "github.com/stretchr/testify/require" @@ -13,8 +14,12 @@ func TestSanitizeAddr(t *testing.T) { }{ // Testcase: trims protocol prefix {addr: "http://celestia.org", want: "celestia.org"}, + // Testcase: protocol prefix trimmed already + {addr: "celestia.org", want: "celestia.org"}, // Testcase: trims protocol prefix, and trims port and trailing slash suffix {addr: "tcp://192.168.42.42:5050/", want: "192.168.42.42"}, + // Testcase: invariant ip + {addr: "192.168.42.42", want: "192.168.42.42"}, } for _, tt := range tests { @@ -27,23 +32,41 @@ func TestSanitizeAddr(t *testing.T) { } func TestValidateAddr(t *testing.T) { + type want struct { + addr string + unresolved bool + } var tests = []struct { addr string - want string + want want }{ // Testcase: ip is valid - {addr: "192.168.42.42:5050", want: "192.168.42.42"}, - // Testcase: hostname is valid - {addr: "https://celestia.org", want: "celestia.org"}, + {addr: "192.168.42.42:5050", want: want{addr: "192.168.42.42"}}, + // Testcase: ip is valid, no port + {addr: "192.168.42.42", want: want{addr: "192.168.42.42"}}, // Testcase: resolves localhost - {addr: "http://localhost:8080/", want: "localhost"}, + {addr: "http://localhost:8080/", want: want{unresolved: true}}, + // Testcase: hostname is valid + {addr: "https://celestia.org", want: want{unresolved: true}}, + // Testcase: hostname is valid, but no schema + {addr: "celestia.org", want: want{unresolved: true}}, } for _, tt := range tests { t.Run(tt.addr, func(t *testing.T) { got, err := ValidateAddr(tt.addr) require.NoError(t, err) - require.Equal(t, tt.want, got) + + // validate that returned value is ip + if ip := net.ParseIP(got); ip == nil { + t.Fatalf("empty ip") + } + + if tt.want.unresolved { + // unresolved addr has no addr to compare with + return + } + require.Equal(t, tt.want.addr, got) }) } } diff --git a/logs/logs.go b/logs/logs.go index 4ae1cddd3f..15c26888c5 100644 --- a/logs/logs.go +++ b/logs/logs.go @@ -21,6 +21,8 @@ func SetAllLoggers(level logging.LogLevel) { _ = logging.SetLogLevel("net/identify", "ERROR") _ = logging.SetLogLevel("shrex/nd", "WARN") _ = logging.SetLogLevel("shrex/eds", "WARN") + _ = logging.SetLogLevel("dagstore", "WARN") + _ = logging.SetLogLevel("dagstore/upgrader", "WARN") _ = logging.SetLogLevel("fx", "FATAL") } diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go new file mode 100644 index 0000000000..7cbe312856 --- /dev/null +++ b/nodebuilder/blob/blob.go @@ -0,0 +1,76 @@ +package blob + +import ( + "context" + + "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/blob" +) + +var _ Module = (*API)(nil) + +// Module defines the API related to interacting with the blobs +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // Submit sends Blobs and reports the height in which they were included. + // Allows sending multiple Blobs atomically synchronously. + // Uses default wallet registered on the Node. + Submit(_ context.Context, _ []*blob.Blob) (height uint64, _ error) + // Get retrieves the blob by commitment under the given namespace and height. + Get(_ context.Context, height uint64, _ namespace.ID, _ blob.Commitment) (*blob.Blob, error) + // GetAll returns all blobs under the given namespaces and height. + GetAll(_ context.Context, height uint64, _ []namespace.ID) ([]*blob.Blob, error) + // GetProof retrieves proofs in the given namespaces at the given height by commitment. + GetProof(_ context.Context, height uint64, _ namespace.ID, _ blob.Commitment) (*blob.Proof, error) + // Included checks whether a blob's given commitment(Merkle subtree root) is included at + // given height and under the namespace. + Included(_ context.Context, height uint64, _ namespace.ID, _ *blob.Proof, _ blob.Commitment) (bool, error) +} + +type API struct { + Internal struct { + Submit func(context.Context, []*blob.Blob) (uint64, error) `perm:"write"` + Get func(context.Context, uint64, namespace.ID, blob.Commitment) (*blob.Blob, error) `perm:"read"` + GetAll func(context.Context, uint64, []namespace.ID) ([]*blob.Blob, error) `perm:"read"` + GetProof func(context.Context, uint64, namespace.ID, blob.Commitment) (*blob.Proof, error) `perm:"read"` + Included func(context.Context, uint64, namespace.ID, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + } +} + +func (api *API) Submit(ctx context.Context, blobs []*blob.Blob) (uint64, error) { + return api.Internal.Submit(ctx, blobs) +} + +func (api *API) Get( + ctx context.Context, + height uint64, + nID namespace.ID, + commitment blob.Commitment, +) (*blob.Blob, error) { + return api.Internal.Get(ctx, height, nID, commitment) +} + +func (api *API) GetAll(ctx context.Context, height uint64, nIDs []namespace.ID) ([]*blob.Blob, error) { + return api.Internal.GetAll(ctx, height, nIDs) +} + +func (api *API) GetProof( + ctx context.Context, + height uint64, + nID namespace.ID, + commitment blob.Commitment, +) (*blob.Proof, error) { + return api.Internal.GetProof(ctx, height, nID, commitment) +} + +func (api *API) Included( + ctx context.Context, + height uint64, + nID namespace.ID, + proof *blob.Proof, + commitment blob.Commitment, +) (bool, error) { + return api.Internal.Included(ctx, height, nID, proof, commitment) +} diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go new file mode 100644 index 0000000000..f99d1d8168 --- /dev/null +++ b/nodebuilder/blob/mocks/api.go @@ -0,0 +1,112 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/blob (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + blob "github.com/celestiaorg/celestia-node/blob" + namespace "github.com/celestiaorg/nmt/namespace" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockModule) Get(arg0 context.Context, arg1 uint64, arg2 namespace.ID, arg3 blob.Commitment) (*blob.Blob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blob.Blob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockModuleMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockModule)(nil).Get), arg0, arg1, arg2, arg3) +} + +// GetAll mocks base method. +func (m *MockModule) GetAll(arg0 context.Context, arg1 uint64, arg2 []namespace.ID) ([]*blob.Blob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAll", arg0, arg1, arg2) + ret0, _ := ret[0].([]*blob.Blob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAll indicates an expected call of GetAll. +func (mr *MockModuleMockRecorder) GetAll(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockModule)(nil).GetAll), arg0, arg1, arg2) +} + +// GetProof mocks base method. +func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 namespace.ID, arg3 blob.Commitment) (*blob.Proof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blob.Proof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProof indicates an expected call of GetProof. +func (mr *MockModuleMockRecorder) GetProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockModule)(nil).GetProof), arg0, arg1, arg2, arg3) +} + +// Included mocks base method. +func (m *MockModule) Included(arg0 context.Context, arg1 uint64, arg2 namespace.ID, arg3 *blob.Proof, arg4 blob.Commitment) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Included", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Included indicates an expected call of Included. +func (mr *MockModuleMockRecorder) Included(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Included", reflect.TypeOf((*MockModule)(nil).Included), arg0, arg1, arg2, arg3, arg4) +} + +// Submit mocks base method. +func (m *MockModule) Submit(arg0 context.Context, arg1 []*blob.Blob) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Submit", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Submit indicates an expected call of Submit. +func (mr *MockModuleMockRecorder) Submit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Submit", reflect.TypeOf((*MockModule)(nil).Submit), arg0, arg1) +} diff --git a/nodebuilder/blob/module.go b/nodebuilder/blob/module.go new file mode 100644 index 0000000000..76e7677725 --- /dev/null +++ b/nodebuilder/blob/module.go @@ -0,0 +1,28 @@ +package blob + +import ( + "context" + + "go.uber.org/fx" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/header" + headerService "github.com/celestiaorg/celestia-node/nodebuilder/header" + "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/state" +) + +func ConstructModule() fx.Option { + return fx.Module("blob", + fx.Provide( + func(service headerService.Module) func(context.Context, uint64) (*header.ExtendedHeader, error) { + return service.GetByHeight + }), + fx.Provide(func( + state *state.CoreAccessor, + sGetter share.Getter, + getByHeightFn func(context.Context, uint64) (*header.ExtendedHeader, error), + ) Module { + return blob.NewService(state, sGetter, getByHeightFn) + })) +} diff --git a/nodebuilder/config.go b/nodebuilder/config.go index 3607aa593e..670bbf9bbd 100644 --- a/nodebuilder/config.go +++ b/nodebuilder/config.go @@ -25,6 +25,7 @@ type ConfigLoader func() (*Config, error) // Config is main configuration structure for a Node. // It combines configuration units for all Node subsystems. type Config struct { + Node node.Config Core core.Config State state.Config P2P p2p.Config @@ -39,6 +40,7 @@ type Config struct { // NOTE: Currently, configs are identical, but this will change. func DefaultConfig(tp node.Type) *Config { commonConfig := &Config{ + Node: node.DefaultConfig(tp), Core: core.DefaultConfig(), State: state.DefaultConfig(), P2P: p2p.DefaultConfig(tp), diff --git a/nodebuilder/core/flags.go b/nodebuilder/core/flags.go index b98c7acd07..9cbed9b277 100644 --- a/nodebuilder/core/flags.go +++ b/nodebuilder/core/flags.go @@ -21,7 +21,8 @@ func Flags() *flag.FlagSet { coreFlag, "", "Indicates node to connect to the given core node. "+ - "Example: , 127.0.0.1. Assumes RPC port 26657 and gRPC port 9090 as default unless otherwise specified.", + "Example: , 127.0.0.1. , subdomain.domain.tld "+ + "Assumes RPC port 26657 and gRPC port 9090 as default unless otherwise specified.", ) flags.String( coreRPCFlag, @@ -55,5 +56,5 @@ func ParseFlags( cfg.IP = coreIP cfg.RPCPort = rpc cfg.GRPCPort = grpc - return nil + return cfg.Validate() } diff --git a/nodebuilder/das/mocks/api.go b/nodebuilder/das/mocks/api.go index 68ffaf3c8c..c4046e90e8 100644 --- a/nodebuilder/das/mocks/api.go +++ b/nodebuilder/das/mocks/api.go @@ -8,9 +8,8 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - das "github.com/celestiaorg/celestia-node/das" + gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/default_services.go b/nodebuilder/default_services.go index 03954c726a..0373e44244 100644 --- a/nodebuilder/default_services.go +++ b/nodebuilder/default_services.go @@ -1,6 +1,7 @@ package nodebuilder import ( + "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -19,5 +20,6 @@ var PackageToAPI = map[string]interface{}{ "header": &header.API{}, "daser": &das.API{}, "p2p": &p2p.API{}, + "blob": &blob.API{}, "node": &node.API{}, } diff --git a/nodebuilder/fraud/mocks/api.go b/nodebuilder/fraud/mocks/api.go index 5ede6f27c5..ba88131695 100644 --- a/nodebuilder/fraud/mocks/api.go +++ b/nodebuilder/fraud/mocks/api.go @@ -8,10 +8,9 @@ import ( context "context" reflect "reflect" + fraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" + fraud0 "github.com/celestiaorg/go-fraud" gomock "github.com/golang/mock/gomock" - - fraud0 "github.com/celestiaorg/celestia-node/nodebuilder/fraud" - "github.com/celestiaorg/go-fraud" ) // MockModule is a mock of Module interface. @@ -38,10 +37,10 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // Get mocks base method. -func (m *MockModule) Get(arg0 context.Context, arg1 fraud.ProofType) ([]fraud0.Proof, error) { +func (m *MockModule) Get(arg0 context.Context, arg1 fraud0.ProofType) ([]fraud.Proof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1) - ret0, _ := ret[0].([]fraud0.Proof) + ret0, _ := ret[0].([]fraud.Proof) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -53,10 +52,10 @@ func (mr *MockModuleMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { } // Subscribe mocks base method. -func (m *MockModule) Subscribe(arg0 context.Context, arg1 fraud.ProofType) (<-chan fraud0.Proof, error) { +func (m *MockModule) Subscribe(arg0 context.Context, arg1 fraud0.ProofType) (<-chan fraud.Proof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Subscribe", arg0, arg1) - ret0, _ := ret[0].(<-chan fraud0.Proof) + ret0, _ := ret[0].(<-chan fraud.Proof) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/nodebuilder/gateway/config.go b/nodebuilder/gateway/config.go index 903a27489a..f85f207ceb 100644 --- a/nodebuilder/gateway/config.go +++ b/nodebuilder/gateway/config.go @@ -8,9 +8,10 @@ import ( ) type Config struct { - Address string - Port string - Enabled bool + Address string + Port string + Enabled bool + deprecatedEndpoints bool } func DefaultConfig() Config { diff --git a/nodebuilder/gateway/constructors.go b/nodebuilder/gateway/constructors.go index c28153b0a5..c771c12023 100644 --- a/nodebuilder/gateway/constructors.go +++ b/nodebuilder/gateway/constructors.go @@ -10,6 +10,7 @@ import ( // Handler constructs a new RPC Handler from the given services. func Handler( + cfg *Config, state state.Module, share share.Module, header header.Module, @@ -17,7 +18,7 @@ func Handler( serv *gateway.Server, ) { handler := gateway.NewHandler(state, share, header, daser) - handler.RegisterEndpoints(serv) + handler.RegisterEndpoints(serv, cfg.deprecatedEndpoints) handler.RegisterMiddleware(serv) } diff --git a/nodebuilder/gateway/flags.go b/nodebuilder/gateway/flags.go index cd13e47162..4d72a278e5 100644 --- a/nodebuilder/gateway/flags.go +++ b/nodebuilder/gateway/flags.go @@ -6,9 +6,10 @@ import ( ) var ( - enabledFlag = "gateway" - addrFlag = "gateway.addr" - portFlag = "gateway.port" + enabledFlag = "gateway" + addrFlag = "gateway.addr" + portFlag = "gateway.port" + deprecatedEndpoints = "gateway.deprecated-endpoints" ) // Flags gives a set of hardcoded node/gateway package flags. @@ -20,6 +21,11 @@ func Flags() *flag.FlagSet { false, "Enables the REST gateway", ) + flags.Bool( + deprecatedEndpoints, + false, + "Enables deprecated endpoints on the gateway. These will be removed in the next release.", + ) flags.String( addrFlag, "", @@ -40,6 +46,10 @@ func ParseFlags(cmd *cobra.Command, cfg *Config) { if cmd.Flags().Changed(enabledFlag) && err == nil { cfg.Enabled = enabled } + deprecatedEndpointsEnabled, err := cmd.Flags().GetBool(deprecatedEndpoints) + if cmd.Flags().Changed(deprecatedEndpoints) && err == nil { + cfg.deprecatedEndpoints = deprecatedEndpointsEnabled + } addr, port := cmd.Flag(addrFlag), cmd.Flag(portFlag) if !cfg.Enabled && (addr.Changed || port.Changed) { log.Warn("custom address or port provided without enabling gateway, setting config values") diff --git a/nodebuilder/gateway/module.go b/nodebuilder/gateway/module.go index b3070e01a6..b727f4c04d 100644 --- a/nodebuilder/gateway/module.go +++ b/nodebuilder/gateway/module.go @@ -21,8 +21,6 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { if !cfg.Enabled { return fx.Options() } - // NOTE @distractedm1nd @renaynay: Remove whenever/if we decide to add auth to gateway - log.Warn("Gateway is enabled, however gateway endpoints are not authenticated. Use with caution!") baseComponents := fx.Options( fx.Supply(cfg), @@ -50,12 +48,13 @@ func ConstructModule(tp node.Type, cfg *Config) fx.Option { "gateway", baseComponents, fx.Invoke(func( + cfg *Config, state stateServ.Module, share shareServ.Module, header headerServ.Module, serv *gateway.Server, ) { - Handler(state, share, header, nil, serv) + Handler(cfg, state, share, header, nil, serv) }), ) default: diff --git a/nodebuilder/header/mocks/api.go b/nodebuilder/header/mocks/api.go index 538169c6be..02529a8ef9 100644 --- a/nodebuilder/header/mocks/api.go +++ b/nodebuilder/header/mocks/api.go @@ -8,11 +8,10 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - header "github.com/celestiaorg/celestia-node/header" header0 "github.com/celestiaorg/go-header" sync "github.com/celestiaorg/go-header/sync" + gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. @@ -156,3 +155,18 @@ func (mr *MockModuleMockRecorder) SyncWait(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWait", reflect.TypeOf((*MockModule)(nil).SyncWait), arg0) } + +// WaitForHeight mocks base method. +func (m *MockModule) WaitForHeight(arg0 context.Context, arg1 uint64) (*header.ExtendedHeader, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForHeight", arg0, arg1) + ret0, _ := ret[0].(*header.ExtendedHeader) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitForHeight indicates an expected call of WaitForHeight. +func (mr *MockModuleMockRecorder) WaitForHeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForHeight", reflect.TypeOf((*MockModule)(nil).WaitForHeight), arg0, arg1) +} diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index e6d7d46b8f..f410c04f04 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -37,7 +37,8 @@ func newHeaderService( sub libhead.Subscriber[*header.ExtendedHeader], p2pServer *p2p.ExchangeServer[*header.ExtendedHeader], ex libhead.Exchange[*header.ExtendedHeader], - store libhead.Store[*header.ExtendedHeader]) Module { + store libhead.Store[*header.ExtendedHeader], +) Module { return &Service{ syncer: syncer, sub: sub, @@ -66,7 +67,7 @@ func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.Exten return nil, err case uint64(head.Height()) == height: return head, nil - case uint64(head.Height()) < height: + case uint64(head.Height())+1 < height: return nil, fmt.Errorf("header: given height is from the future: "+ "networkHeight: %d, requestedHeight: %d", head.Height(), height) } diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 3f3b08e68b..51edc26c72 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -6,6 +6,7 @@ import ( "go.uber.org/fx" "github.com/celestiaorg/celestia-node/libs/fxutil" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -52,6 +53,7 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store core.ConstructModule(tp, &cfg.Core), das.ConstructModule(tp, &cfg.DASer), fraud.ConstructModule(tp), + blob.ConstructModule(), node.ConstructModule(tp), ) diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 90caac5dc9..33132dc5e4 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "strings" - "time" "github.com/ipfs/go-blockservice" exchange "github.com/ipfs/go-ipfs-exchange-interface" @@ -21,6 +20,7 @@ import ( "github.com/celestiaorg/celestia-node/api/gateway" "github.com/celestiaorg/celestia-node/api/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -30,8 +30,6 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/state" ) -var Timeout = time.Minute * 2 - var ( log = logging.Logger("node") fxLog = logging.Logger("fx") @@ -68,7 +66,9 @@ type Node struct { HeaderServ header.Module // not optional StateServ state.Module // not optional FraudServ fraud.Module // not optional + BlobServ blob.Module // not optional DASer das.Module // not optional + AdminServ node.Module // not optional // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc @@ -93,14 +93,15 @@ func NewWithConfig(tp node.Type, network p2p.Network, store Store, cfg *Config, // Start launches the Node and all its components and services. func (n *Node) Start(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, Timeout) + to := n.Config.Node.StartupTimeout + ctx, cancel := context.WithTimeout(ctx, to) defer cancel() err := n.start(ctx) if err != nil { log.Debugf("error starting %s Node: %s", n.Type, err) if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("node: failed to start within timeout(%s): %w", Timeout, err) + return fmt.Errorf("node: failed to start within timeout(%s): %w", to, err) } return fmt.Errorf("node: failed to start: %w", err) } @@ -138,14 +139,15 @@ func (n *Node) Run(ctx context.Context) error { // Canceling the given context earlier 'ctx' unblocks the Stop and aborts graceful shutdown forcing // remaining Modules/Services to close immediately. func (n *Node) Stop(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, Timeout) + to := n.Config.Node.ShutdownTimeout + ctx, cancel := context.WithTimeout(ctx, to) defer cancel() err := n.stop(ctx) if err != nil { log.Debugf("error stopping %s Node: %s", n.Type, err) if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("node: failed to stop within timeout(%s): %w", Timeout, err) + return fmt.Errorf("node: failed to stop within timeout(%s): %w", to, err) } return fmt.Errorf("node: failed to stop: %w", err) } diff --git a/nodebuilder/node/admin.go b/nodebuilder/node/admin.go index 4607c4d33f..c6c97625ef 100644 --- a/nodebuilder/node/admin.go +++ b/nodebuilder/node/admin.go @@ -2,21 +2,25 @@ package node import ( "context" - "fmt" + "github.com/cristalhq/jwt" "github.com/filecoin-project/go-jsonrpc/auth" logging "github.com/ipfs/go-log/v2" + + "github.com/celestiaorg/celestia-node/libs/authtoken" ) -const APIVersion = "v0.1.0" +const APIVersion = "v0.2.1" type module struct { - tp Type + tp Type + signer jwt.Signer } -func newModule(tp Type) Module { +func newModule(tp Type, signer jwt.Signer) Module { return &module{ - tp: tp, + tp: tp, + signer: signer, } } @@ -38,10 +42,10 @@ func (m *module) LogLevelSet(_ context.Context, name, level string) error { return logging.SetLogLevel(name, level) } -func (m *module) AuthVerify(context.Context, string) ([]auth.Permission, error) { - return []auth.Permission{}, fmt.Errorf("not implemented") +func (m *module) AuthVerify(_ context.Context, token string) ([]auth.Permission, error) { + return authtoken.ExtractSignedPermissions(m.signer, token) } -func (m *module) AuthNew(context.Context, []auth.Permission) ([]byte, error) { - return nil, fmt.Errorf("not implemented") +func (m *module) AuthNew(_ context.Context, permissions []auth.Permission) (string, error) { + return authtoken.NewSignedJWT(m.signer, permissions) } diff --git a/nodebuilder/node/config.go b/nodebuilder/node/config.go new file mode 100644 index 0000000000..e44fe2f014 --- /dev/null +++ b/nodebuilder/node/config.go @@ -0,0 +1,38 @@ +package node + +import ( + "fmt" + "time" +) + +var defaultLifecycleTimeout = time.Minute * 2 + +type Config struct { + StartupTimeout time.Duration + ShutdownTimeout time.Duration +} + +// DefaultConfig returns the default node configuration for a given node type. +func DefaultConfig(tp Type) Config { + var timeout time.Duration + switch tp { + case Light: + timeout = time.Second * 20 + default: + timeout = defaultLifecycleTimeout + } + return Config{ + StartupTimeout: timeout, + ShutdownTimeout: timeout, + } +} + +func (c *Config) Validate() error { + if c.StartupTimeout == 0 { + return fmt.Errorf("invalid startup timeout: %v", c.StartupTimeout) + } + if c.ShutdownTimeout == 0 { + return fmt.Errorf("invalid shutdown timeout: %v", c.ShutdownTimeout) + } + return nil +} diff --git a/nodebuilder/node/mocks/api.go b/nodebuilder/node/mocks/api.go index 39d500bc04..d8789a771c 100644 --- a/nodebuilder/node/mocks/api.go +++ b/nodebuilder/node/mocks/api.go @@ -8,10 +8,9 @@ import ( context "context" reflect "reflect" + node "github.com/celestiaorg/celestia-node/nodebuilder/node" auth "github.com/filecoin-project/go-jsonrpc/auth" gomock "github.com/golang/mock/gomock" - - node "github.com/celestiaorg/celestia-node/nodebuilder/node" ) // MockModule is a mock of Module interface. @@ -38,10 +37,10 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // AuthNew mocks base method. -func (m *MockModule) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { +func (m *MockModule) AuthNew(arg0 context.Context, arg1 []auth.Permission) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) - ret0, _ := ret[0].([]byte) + ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/nodebuilder/node/module.go b/nodebuilder/node/module.go index e7ae88182e..5abfad8e5f 100644 --- a/nodebuilder/node/module.go +++ b/nodebuilder/node/module.go @@ -9,7 +9,7 @@ func ConstructModule(tp Type) fx.Option { return fx.Module( "node", fx.Provide(func(secret jwt.Signer) Module { - return newModule(tp) + return newModule(tp, secret) }), fx.Provide(secret), ) diff --git a/nodebuilder/node/node.go b/nodebuilder/node/node.go index c33f73ef58..18ce93615b 100644 --- a/nodebuilder/node/node.go +++ b/nodebuilder/node/node.go @@ -20,7 +20,7 @@ type Module interface { // AuthVerify returns the permissions assigned to the given token. AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) // AuthNew signs and returns a new token with the given permissions. - AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) + AuthNew(ctx context.Context, perms []auth.Permission) (string, error) } var _ Module = (*API)(nil) @@ -30,7 +30,7 @@ type API struct { Info func(context.Context) (Info, error) `perm:"admin"` LogLevelSet func(ctx context.Context, name, level string) error `perm:"admin"` AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"admin"` - AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"` + AuthNew func(ctx context.Context, perms []auth.Permission) (string, error) `perm:"admin"` } } @@ -46,6 +46,6 @@ func (api *API) AuthVerify(ctx context.Context, token string) ([]auth.Permission return api.Internal.AuthVerify(ctx, token) } -func (api *API) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { +func (api *API) AuthNew(ctx context.Context, perms []auth.Permission) (string, error) { return api.Internal.AuthNew(ctx, perms) } diff --git a/nodebuilder/node_test.go b/nodebuilder/node_test.go index 11b27b076a..e6775419c7 100644 --- a/nodebuilder/node_test.go +++ b/nodebuilder/node_test.go @@ -34,6 +34,7 @@ func TestLifecycle(t *testing.T) { require.NotNil(t, node.Host) require.NotNil(t, node.HeaderServ) require.NotNil(t, node.StateServ) + require.NotNil(t, node.AdminServ) require.Equal(t, tt.tp, node.Type) ctx, cancel := context.WithCancel(context.Background()) @@ -136,6 +137,9 @@ func StartMockOtelCollectorHTTPServer(t *testing.T) (string, func()) { } func TestEmptyBlockExists(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var test = []struct { tp node.Type }{ @@ -148,10 +152,6 @@ func TestEmptyBlockExists(t *testing.T) { for i, tt := range test { t.Run(strconv.Itoa(i), func(t *testing.T) { node := TestNode(t, tt.tp) - - ctx, cancel := context.WithTimeout(context.Background(), Timeout) - defer cancel() - err := node.Start(ctx) require.NoError(t, err) diff --git a/nodebuilder/p2p/bootstrap.go b/nodebuilder/p2p/bootstrap.go index 0ceefa0453..c534aa4fba 100644 --- a/nodebuilder/p2p/bootstrap.go +++ b/nodebuilder/p2p/bootstrap.go @@ -38,15 +38,15 @@ func bootstrappersFor(net Network) ([]string, error) { // NOTE: Every time we add a new long-running network, its bootstrap peers have to be added here. var bootstrapList = map[Network][]string{ Arabica: { - "/dns4/limani.celestia-devops.dev/tcp/2121/p2p/12D3KooWDgG69kXfmSiHjUErN2ahpUC1SXpSfB2urrqMZ6aWC8NS", - "/dns4/marsellesa.celestia-devops.dev/tcp/2121/p2p/12D3KooWHr2wqFAsMXnPzpFsgxmePgXb8BqpkePebwUgLyZc95bd", - "/dns4/parainem.celestia-devops.dev/tcp/2121/p2p/12D3KooWHX8xpwg8qkP7kLKmKGtgZvmsopvgxc6Fwtu665QC7G8q", - "/dns4/kaarina.celestia-devops.dev/tcp/2121/p2p/12D3KooWN6fzdt4sG5QfWRPn4kwCQBdkt7TDNQkWsUymAwKrmvUs", + "/dns4/da-bridge-arabica-8.celestia-arabica.com/tcp/2121/p2p/12D3KooWDXkXARv79Dtn5xrGBgJePtCzCsEwWR7eGWnx9ZCyUyD6", + "/dns4/da-bridge-arabica-8-2.celestia-arabica.com/tcp/2121/p2p/12D3KooWPu8qKmmNgYFMBsTkLBa1m3D9Cy9ReCAoQLqxEn9MHD1i", + "/dns4/da-full-1-arabica-8.celestia-arabica.com/tcp/2121/p2p/12D3KooWEmeFodzypdTBTcw8Yub6WZRT4h1UgFtwCwwq6wS5Dtqm", + "/dns4/da-full-2-arabica-8.celestia-arabica.com/tcp/2121/p2p/12D3KooWCs3wFmqwPn1u8pNU4BGsvLsob1ShTzvps8qEtTRuuuK5", }, Mocha: { - "/dns4/andromeda.celestia-devops.dev/tcp/2121/p2p/12D3KooWKvPXtV1yaQ6e3BRNUHa5Phh8daBwBi3KkGaSSkUPys6D", - "/dns4/libra.celestia-devops.dev/tcp/2121/p2p/12D3KooWK5aDotDcLsabBmWDazehQLMsDkRyARm1k7f1zGAXqbt4", - "/dns4/norma.celestia-devops.dev/tcp/2121/p2p/12D3KooWHYczJDVNfYVkLcNHPTDKCeiVvRhg8Q9JU3bE3m9eEVyY", + "/dns4/bootstr-mocha-1.celestia-mocha.com/tcp/2121/p2p/12D3KooWDRSJMbH3PS4dRDa11H7Tk615aqTUgkeEKz4pwd4sS6fN", + "/dns4/bootstr-mocha-2.celestia-mocha.com/tcp/2121/p2p/12D3KooWEk7cxtjQCC7kC84Uhs2j6dAHjdbwYnPcvUAqmj6Zsry2", + "/dns4/bootstr-mocha-3.celestia-mocha.com/tcp/2121/p2p/12D3KooWBE4QcFXZzENf2VRo6Y5LBvp9gzmpYRHKCvgGzEYj7Hdn", }, BlockspaceRace: { "/dns4/bootstr-incent-3.celestia.tools/tcp/2121/p2p/12D3KooWNzdKcHagtvvr6qtjcPTAdCN6ZBiBLH8FBHbihxqu4GZx", diff --git a/nodebuilder/p2p/genesis.go b/nodebuilder/p2p/genesis.go index 670699b292..0a36dc54cc 100644 --- a/nodebuilder/p2p/genesis.go +++ b/nodebuilder/p2p/genesis.go @@ -23,8 +23,8 @@ func GenesisFor(net Network) (string, error) { // NOTE: Every time we add a new long-running network, its genesis hash has to be added here. var genesisList = map[Network]string{ - Arabica: "EE310062CBB13CE98CBC7EAD3F6A827F0E4A86043FDEB2DA42048821877FE45C", - Mocha: "8038B21032C941372ED601699857043C12E5CC7D5945DCEEA4567D11B5712526", + Arabica: "E5D620B5BE7873222DCD83464C285FD0F215C209393E7481F9A5979280AD6CA2", + Mocha: "1181AF8EAE5DDF3CBBFF3BF3CC44C5B795DF5094F5A0CC0AE52921ECCA0AF3C8", BlockspaceRace: "1A8491A72F73929680DAA6C93E3B593579261B2E76536BFA4F5B97D6FE76E088", Private: "", } diff --git a/nodebuilder/p2p/network.go b/nodebuilder/p2p/network.go index 025ba3bddf..dd04cd377c 100644 --- a/nodebuilder/p2p/network.go +++ b/nodebuilder/p2p/network.go @@ -12,9 +12,9 @@ const ( // DefaultNetwork is the default network of the current build. DefaultNetwork = Mocha // Arabica testnet. See: celestiaorg/networks. - Arabica Network = "arabica-6" + Arabica Network = "arabica-8" // Mocha testnet. See: celestiaorg/networks. - Mocha Network = "mocha" + Mocha Network = "mocha-2" // BlockspaceRace testnet. See: https://docs.celestia.org/nodes/blockspace-race/. BlockspaceRace Network = "blockspacerace-0" // Private can be used to set up any private network, including local testing setups. diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 3d5c368042..ca30af6305 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -4,6 +4,7 @@ import ( "github.com/cristalhq/jwt" "github.com/celestiaorg/celestia-node/api/rpc" + "github.com/celestiaorg/celestia-node/nodebuilder/blob" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" "github.com/celestiaorg/celestia-node/nodebuilder/header" @@ -22,6 +23,7 @@ func registerEndpoints( daserMod das.Module, p2pMod p2p.Module, nodeMod node.Module, + blobMod blob.Module, serv *rpc.Server, ) { serv.RegisterAuthedService("fraud", fraudMod, &fraud.API{}) @@ -31,6 +33,7 @@ func registerEndpoints( serv.RegisterAuthedService("share", shareMod, &share.API{}) serv.RegisterAuthedService("p2p", p2pMod, &p2p.API{}) serv.RegisterAuthedService("node", nodeMod, &node.API{}) + serv.RegisterAuthedService("blob", blobMod, &blob.API{}) } func server(cfg *Config, auth jwt.Signer) *rpc.Server { diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index 1b26273c0f..586c6dab4b 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -8,12 +8,11 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - da "github.com/celestiaorg/celestia-app/pkg/da" share "github.com/celestiaorg/celestia-node/share" namespace "github.com/celestiaorg/nmt/namespace" rsmt2d "github.com/celestiaorg/rsmt2d" + gomock "github.com/golang/mock/gomock" ) // MockModule is a mock of Module interface. diff --git a/nodebuilder/state/mocks/api.go b/nodebuilder/state/mocks/api.go index 3658c7ebf8..dbd1d5dabe 100644 --- a/nodebuilder/state/mocks/api.go +++ b/nodebuilder/state/mocks/api.go @@ -9,12 +9,12 @@ import ( reflect "reflect" math "cosmossdk.io/math" + blob "github.com/celestiaorg/celestia-node/blob" + state "github.com/celestiaorg/celestia-node/state" types "github.com/cosmos/cosmos-sdk/types" types0 "github.com/cosmos/cosmos-sdk/x/staking/types" gomock "github.com/golang/mock/gomock" types1 "github.com/tendermint/tendermint/types" - - namespace "github.com/celestiaorg/nmt/namespace" ) // MockModule is a mock of Module interface. @@ -41,10 +41,10 @@ func (m *MockModule) EXPECT() *MockModuleMockRecorder { } // AccountAddress mocks base method. -func (m *MockModule) AccountAddress(arg0 context.Context) (types.Address, error) { +func (m *MockModule) AccountAddress(arg0 context.Context) (state.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AccountAddress", arg0) - ret0, _ := ret[0].(types.Address) + ret0, _ := ret[0].(state.Address) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -71,7 +71,7 @@ func (mr *MockModuleMockRecorder) Balance(arg0 interface{}) *gomock.Call { } // BalanceForAddress mocks base method. -func (m *MockModule) BalanceForAddress(arg0 context.Context, arg1 types.Address) (*types.Coin, error) { +func (m *MockModule) BalanceForAddress(arg0 context.Context, arg1 state.Address) (*types.Coin, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BalanceForAddress", arg0, arg1) ret0, _ := ret[0].(*types.Coin) @@ -189,19 +189,19 @@ func (mr *MockModuleMockRecorder) QueryUnbonding(arg0, arg1 interface{}) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryUnbonding", reflect.TypeOf((*MockModule)(nil).QueryUnbonding), arg0, arg1) } -// SubmitPayForData mocks base method. -func (m *MockModule) SubmitPayForBlob(arg0 context.Context, arg1 namespace.ID, arg2 []byte, arg3 math.Int, arg4 uint64) (*types.TxResponse, error) { +// SubmitPayForBlob mocks base method. +func (m *MockModule) SubmitPayForBlob(arg0 context.Context, arg1 math.Int, arg2 uint64, arg3 []*blob.Blob) (*types.TxResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubmitPayForBlob", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "SubmitPayForBlob", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*types.TxResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// SubmitPayForData indicates an expected call of SubmitPayForData. -func (mr *MockModuleMockRecorder) SubmitPayForData(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +// SubmitPayForBlob indicates an expected call of SubmitPayForBlob. +func (mr *MockModuleMockRecorder) SubmitPayForBlob(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitPayForBlob", reflect.TypeOf((*MockModule)(nil).SubmitPayForBlob), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitPayForBlob", reflect.TypeOf((*MockModule)(nil).SubmitPayForBlob), arg0, arg1, arg2, arg3) } // SubmitTx mocks base method. diff --git a/nodebuilder/state/state.go b/nodebuilder/state/state.go index 07f61e5952..c66205d594 100644 --- a/nodebuilder/state/state.go +++ b/nodebuilder/state/state.go @@ -5,8 +5,7 @@ import ( "github.com/cosmos/cosmos-sdk/x/staking/types" - "github.com/celestiaorg/nmt/namespace" - + "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/state" ) @@ -36,7 +35,9 @@ type Module interface { // Transfer sends the given amount of coins from default wallet of the node to the given account // address. - Transfer(ctx context.Context, to state.AccAddress, amount, fee state.Int, gasLimit uint64) (*state.TxResponse, error) + Transfer( + ctx context.Context, to state.AccAddress, amount, fee state.Int, gasLimit uint64, + ) (*state.TxResponse, error) // SubmitTx submits the given transaction/message to the // Celestia network and blocks until the tx is included in // a block. @@ -44,10 +45,9 @@ type Module interface { // SubmitPayForBlob builds, signs and submits a PayForBlob transaction. SubmitPayForBlob( ctx context.Context, - nID namespace.ID, - data []byte, fee state.Int, gasLim uint64, + blobs []*blob.Blob, ) (*state.TxResponse, error) // CancelUnbondingDelegation cancels a user's pending undelegation from a validator. @@ -113,10 +113,9 @@ type API struct { SubmitTx func(ctx context.Context, tx state.Tx) (*state.TxResponse, error) `perm:"write"` SubmitPayForBlob func( ctx context.Context, - nID namespace.ID, - data []byte, fee state.Int, gasLim uint64, + blobs []*blob.Blob, ) (*state.TxResponse, error) `perm:"write"` CancelUnbondingDelegation func( ctx context.Context, @@ -192,12 +191,11 @@ func (api *API) SubmitTx(ctx context.Context, tx state.Tx) (*state.TxResponse, e func (api *API) SubmitPayForBlob( ctx context.Context, - nID namespace.ID, - data []byte, fee state.Int, gasLim uint64, + blobs []*blob.Blob, ) (*state.TxResponse, error) { - return api.Internal.SubmitPayForBlob(ctx, nID, data, fee, gasLim) + return api.Internal.SubmitPayForBlob(ctx, fee, gasLim, blobs) } func (api *API) CancelUnbondingDelegation( diff --git a/nodebuilder/tests/api_test.go b/nodebuilder/tests/api_test.go index 27a10a5592..37504ec3b3 100644 --- a/nodebuilder/tests/api_test.go +++ b/nodebuilder/tests/api_test.go @@ -3,16 +3,127 @@ package tests import ( "context" "testing" + "time" + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "github.com/celestiaorg/celestia-node/api/rpc/client" + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" ) +func TestNodeModule(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() + + writePerms := []auth.Permission{"public", "read", "write"} + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) + require.NoError(t, err) + + client, err := client.NewClient(ctx, bridgeAddr, jwt) + require.NoError(t, err) + + info, err := client.Node.Info(ctx) + require.NoError(t, err) + require.Equal(t, info.APIVersion, node.APIVersion) + + perms, err := client.Node.AuthVerify(ctx, jwt) + require.NoError(t, err) + require.Equal(t, perms, adminPerms) + + writeJWT, err := client.Node.AuthNew(ctx, writePerms) + require.NoError(t, err) + + perms, err = client.Node.AuthVerify(ctx, writeJWT) + require.NoError(t, err) + require.Equal(t, perms, writePerms) + +} + +func TestGetByHeight(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(time.Second)) + + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) + require.NoError(t, err) + + bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() + client, err := client.NewClient(ctx, bridgeAddr, jwt) + require.NoError(t, err) + + // let a few blocks be produced + _, err = client.Header.WaitForHeight(ctx, 3) + require.NoError(t, err) + + networkHead, err := client.Header.NetworkHead(ctx) + require.NoError(t, err) + _, err = client.Header.GetByHeight(ctx, uint64(networkHead.Height()+1)) + require.Nil(t, err, "Requesting syncer.Head()+1 shouldn't return an error") + + networkHead, err = client.Header.NetworkHead(ctx) + require.NoError(t, err) + _, err = client.Header.GetByHeight(ctx, uint64(networkHead.Height()+2)) + require.ErrorContains(t, err, "given height is from the future") +} + +// TestBlobRPC ensures that blobs can be submited via rpc +func TestBlobRPC(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) + + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + + // start a bridge node + bridge := sw.NewBridgeNode() + err := bridge.Start(ctx) + require.NoError(t, err) + + adminPerms := []auth.Permission{"public", "read", "write", "admin"} + jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) + require.NoError(t, err) + + bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() + client, err := client.NewClient(ctx, bridgeAddr, jwt) + require.NoError(t, err) + + appBlobs, err := blobtest.GenerateBlobs([]int{8}, false) + require.NoError(t, err) + + newBlob, err := blob.NewBlob( + appBlobs[0].ShareVersion, + append([]byte{appBlobs[0].NamespaceVersion}, appBlobs[0].NamespaceID...), + appBlobs[0].Data, + ) + require.NoError(t, err) + + height, err := client.Blob.Submit(ctx, []*blob.Blob{newBlob}) + require.NoError(t, err) + require.True(t, height != 0) +} + // TestHeaderSubscription ensures that the header subscription over RPC works // as intended and gets canceled successfully after rpc context cancellation. func TestHeaderSubscription(t *testing.T) { diff --git a/nodebuilder/tests/blob_test.go b/nodebuilder/tests/blob_test.go new file mode 100644 index 0000000000..537e320c5a --- /dev/null +++ b/nodebuilder/tests/blob_test.go @@ -0,0 +1,133 @@ +package tests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/blob" + "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/nodebuilder/node" + "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" +) + +func TestBlobModule(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + sw := swamp.NewSwamp(t) + + appBlobs0, err := blobtest.GenerateBlobs([]int{8, 4}, true) + require.NoError(t, err) + appBlobs1, err := blobtest.GenerateBlobs([]int{4}, false) + require.NoError(t, err) + blobs := make([]*blob.Blob, 0, len(appBlobs0)+len(appBlobs1)) + + for _, b := range append(appBlobs0, appBlobs1...) { + blob, err := blob.NewBlob(b.ShareVersion, append([]byte{b.NamespaceVersion}, b.NamespaceID...), b.Data) + require.NoError(t, err) + blobs = append(blobs, blob) + } + + require.NoError(t, err) + bridge := sw.NewBridgeNode() + require.NoError(t, bridge.Start(ctx)) + + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) + + fullCfg := sw.DefaultTestConfig(node.Full) + fullCfg.Header.TrustedPeers = append(fullCfg.Header.TrustedPeers, addrs[0].String()) + fullNode := sw.NewNodeWithConfig(node.Full, fullCfg) + require.NoError(t, fullNode.Start(ctx)) + + addrsFull, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(fullNode.Host)) + require.NoError(t, err) + + lightCfg := sw.DefaultTestConfig(node.Light) + lightCfg.Header.TrustedPeers = append(lightCfg.Header.TrustedPeers, addrsFull[0].String()) + lightNode := sw.NewNodeWithConfig(node.Light, lightCfg) + require.NoError(t, lightNode.Start(ctx)) + + height, err := fullNode.BlobServ.Submit(ctx, blobs) + require.NoError(t, err) + + _, err = fullNode.HeaderServ.WaitForHeight(ctx, height) + require.NoError(t, err) + _, err = lightNode.HeaderServ.WaitForHeight(ctx, height) + require.NoError(t, err) + + var test = []struct { + name string + doFn func(t *testing.T) + }{ + { + name: "Get", + doFn: func(t *testing.T) { + blob1, err := fullNode.BlobServ.Get(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + require.Equal(t, blobs[0], blob1) + }, + }, + { + name: "GetAll", + doFn: func(t *testing.T) { + newBlobs, err := fullNode.BlobServ.GetAll(ctx, height, []namespace.ID{blobs[0].Namespace()}) + require.NoError(t, err) + require.Len(t, newBlobs, len(appBlobs0)) + require.True(t, bytes.Equal(blobs[0].Commitment, newBlobs[0].Commitment)) + require.True(t, bytes.Equal(blobs[1].Commitment, newBlobs[1].Commitment)) + }, + }, + { + name: "Included", + doFn: func(t *testing.T) { + proof, err := fullNode.BlobServ.GetProof(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) + require.NoError(t, err) + + included, err := lightNode.BlobServ.Included( + ctx, + height, + blobs[0].Namespace(), + proof, + blobs[0].Commitment, + ) + require.NoError(t, err) + require.True(t, included) + }, + }, + { + name: "Not Found", + doFn: func(t *testing.T) { + appBlob, err := blobtest.GenerateBlobs([]int{4}, false) + require.NoError(t, err) + newBlob, err := blob.NewBlob( + appBlob[0].ShareVersion, + append([]byte{appBlob[0].NamespaceVersion}, appBlob[0].NamespaceID...), + appBlob[0].Data, + ) + require.NoError(t, err) + + b, err := fullNode.BlobServ.Get(ctx, height, newBlob.Namespace(), newBlob.Commitment) + assert.Nil(t, b) + require.Error(t, err) + require.ErrorIs(t, err, blob.ErrBlobNotFound) + }, + }, + } + + for _, tt := range test { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tt.doFn(t) + }) + } +} diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index 2022107cb7..f652724d55 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -34,17 +34,18 @@ Another note: this test disables share exchange to speed up test results. */ func TestFraudProofBroadcasting(t *testing.T) { t.Skip("requires BEFP generation on app side to work") + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) const ( - blocks = 15 - bsize = 2 - btime = time.Millisecond * 300 + blocks = 15 + blockSize = 2 + blockTime = time.Millisecond * 300 ) - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) - t.Cleanup(cancel) - fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(blockTime)) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, blockSize, blocks) + cfg := nodebuilder.DefaultConfig(node.Bridge) cfg.Share.UseShareExchange = false bridge := sw.NewNodeWithConfig( @@ -55,12 +56,13 @@ func TestFraudProofBroadcasting(t *testing.T) { err := bridge.Start(ctx) require.NoError(t, err) - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) cfg = nodebuilder.DefaultConfig(node.Full) cfg.Share.UseShareExchange = false + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + require.NoError(t, err) cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) + store := nodebuilder.MockStore(t, cfg) full := sw.NewNodeWithStore(node.Full, store) @@ -72,9 +74,12 @@ func TestFraudProofBroadcasting(t *testing.T) { subscr, err := full.FraudServ.Subscribe(ctx, byzantine.BadEncoding) require.NoError(t, err) - p := <-subscr - require.Equal(t, 10, int(p.Height())) - + select { + case p := <-subscr: + require.Equal(t, 10, int(p.Height())) + case <-ctx.Done(): + t.Fatal("fraud proof was not received in time") + } // This is an obscure way to check if the Syncer was stopped. // If we cannot get a height header within a timeframe it means the syncer was stopped // FIXME: Eventually, this should be a check on service registry managing and keeping @@ -84,8 +89,7 @@ func TestFraudProofBroadcasting(t *testing.T) { require.ErrorIs(t, err, context.DeadlineExceeded) syncCancel() - require.NoError(t, full.Stop(ctx)) - require.NoError(t, sw.RemoveNode(full, node.Full)) + sw.StopNode(ctx, full) full = sw.NewNodeWithStore(node.Full, store) diff --git a/nodebuilder/tests/p2p_test.go b/nodebuilder/tests/p2p_test.go index 39c3c985a2..613dface94 100644 --- a/nodebuilder/tests/p2p_test.go +++ b/nodebuilder/tests/p2p_test.go @@ -142,7 +142,7 @@ func TestBootstrapNodesFromBridgeNode(t *testing.T) { // ensure that the light node is connected to the full node assert.True(t, light.Host.Network().Connectedness(addrFull.ID) == network.Connected) - sw.Disconnect(t, light.Host.ID(), full.Host.ID()) + sw.Disconnect(t, light, full) require.NoError(t, full.Stop(ctx)) select { case <-ctx.Done(): @@ -206,7 +206,7 @@ func TestRestartNodeDiscovery(t *testing.T) { connectSub, err := nodes[0].Host.EventBus().Subscribe(&event.EvtPeerConnectednessChanged{}) require.NoError(t, err) defer connectSub.Close() - sw.Disconnect(t, nodes[0].Host.ID(), nodes[1].Host.ID()) + sw.Disconnect(t, nodes[0], nodes[1]) require.NoError(t, node.Start(ctx)) // ensure that the last node is connected to one of the nodes diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index 5b99b577b1..eabe33dc9c 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "fmt" "net" + "sync" "testing" "time" @@ -16,6 +17,7 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" "go.uber.org/fx" + "golang.org/x/exp/maps" "github.com/celestiaorg/celestia-app/test/util/testnode" apptypes "github.com/celestiaorg/celestia-app/x/blob/types" @@ -45,16 +47,18 @@ const DefaultTestTimeout = time.Minute * 5 // - Slices of created Bridge/Full/Light Nodes // - trustedHash taken from the CoreClient and shared between nodes type Swamp struct { - t *testing.T - Network mocknet.Mocknet - BridgeNodes []*nodebuilder.Node - FullNodes []*nodebuilder.Node - LightNodes []*nodebuilder.Node - comps *Config + t *testing.T + cfg *Config + + Network mocknet.Mocknet + Bootstrappers []ma.Multiaddr ClientContext testnode.Context Accounts []string + nodesMu sync.Mutex + nodes map[*nodebuilder.Node]struct{} + genesis *header.ExtendedHeader } @@ -69,37 +73,38 @@ func NewSwamp(t *testing.T, options ...Option) *Swamp { option(ic) } - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - // Now, we are making an assumption that consensus mechanism is already tested out // so, we are not creating bridge nodes with each one containing its own core client // instead we are assigning all created BNs to 1 Core from the swamp cctx := core.StartTestNodeWithConfig(t, ic.TestConfig) swp := &Swamp{ t: t, + cfg: ic, Network: mocknet.New(), ClientContext: cctx, - comps: ic, Accounts: ic.Accounts, + nodes: map[*nodebuilder.Node]struct{}{}, } - swp.t.Cleanup(func() { - swp.stopAllNodes(ctx, swp.BridgeNodes, swp.FullNodes, swp.LightNodes) - }) - - swp.setupGenesis(ctx) + swp.t.Cleanup(swp.cleanup) + swp.setupGenesis() return swp } -// stopAllNodes goes through all received slices of Nodes and stops one-by-one -// this eliminates a manual clean-up in the test-cases itself in the end -func (s *Swamp) stopAllNodes(ctx context.Context, allNodes ...[]*nodebuilder.Node) { - for _, nodes := range allNodes { - for _, node := range nodes { - require.NoError(s.t, node.Stop(ctx)) - } - } +// cleanup frees up all the resources +// including stop of all created nodes +func (s *Swamp) cleanup() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + require.NoError(s.t, s.Network.Close()) + + s.nodesMu.Lock() + defer s.nodesMu.Unlock() + maps.DeleteFunc(s.nodes, func(nd *nodebuilder.Node, _ struct{}) bool { + require.NoError(s.t, nd.Stop(ctx)) + return true + }) } // GetCoreBlockHashByHeight returns a tendermint block's hash by provided height @@ -158,7 +163,10 @@ func (s *Swamp) createPeer(ks keystore.Keystore) host.Host { // setupGenesis sets up genesis Header. // This is required to initialize and start correctly. -func (s *Swamp) setupGenesis(ctx context.Context) { +func (s *Swamp) setupGenesis() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // ensure core has surpassed genesis block s.WaitTillHeight(ctx, 2) @@ -180,7 +188,7 @@ func (s *Swamp) setupGenesis(ctx context.Context) { func (s *Swamp) DefaultTestConfig(tp node.Type) *nodebuilder.Config { cfg := nodebuilder.DefaultConfig(tp) - ip, port, err := net.SplitHostPort(s.comps.App.GRPC.Address) + ip, port, err := net.SplitHostPort(s.cfg.App.GRPC.Address) require.NoError(s.t, err) cfg.Core.IP = ip @@ -204,6 +212,10 @@ func (s *Swamp) NewFullNode(options ...fx.Option) *nodebuilder.Node { cfg.Header.TrustedPeers = []string{ "/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p", } + // add all bootstrappers in suite as trusted peers + for _, bootstrapper := range s.Bootstrappers { + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, bootstrapper.String()) + } store := nodebuilder.MockStore(s.t, cfg) return s.NewNodeWithStore(node.Full, store, options...) @@ -216,6 +228,10 @@ func (s *Swamp) NewLightNode(options ...fx.Option) *nodebuilder.Node { cfg.Header.TrustedPeers = []string{ "/ip4/1.2.3.4/tcp/12345/p2p/12D3KooWNaJ1y1Yio3fFJEXCZyd1Cat3jmrPdgkYCrHfKD3Ce21p", } + // add all bootstrappers in suite as trusted peers + for _, bootstrapper := range s.Bootstrappers { + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, bootstrapper.String()) + } store := nodebuilder.MockStore(s.t, cfg) @@ -224,40 +240,37 @@ func (s *Swamp) NewLightNode(options ...fx.Option) *nodebuilder.Node { func (s *Swamp) NewNodeWithConfig(nodeType node.Type, cfg *nodebuilder.Config, options ...fx.Option) *nodebuilder.Node { store := nodebuilder.MockStore(s.t, cfg) + // add all bootstrappers in suite as trusted peers + for _, bootstrapper := range s.Bootstrappers { + cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, bootstrapper.String()) + } return s.NewNodeWithStore(nodeType, store, options...) } // NewNodeWithStore creates a new instance of Node with predefined Store. -// Afterwards, the instance is stored in the swamp's Nodes' slice according to the -// node's type provided from the user. func (s *Swamp) NewNodeWithStore( - t node.Type, + tp node.Type, store nodebuilder.Store, options ...fx.Option, ) *nodebuilder.Node { - var n *nodebuilder.Node - signer := apptypes.NewKeyringSigner(s.ClientContext.Keyring, s.Accounts[0], s.ClientContext.ChainID) options = append(options, state.WithKeyringSigner(signer), ) - switch t { + switch tp { case node.Bridge: options = append(options, coremodule.WithClient(s.ClientContext.Client), ) - n = s.newNode(node.Bridge, store, options...) - s.BridgeNodes = append(s.BridgeNodes, n) - case node.Full: - n = s.newNode(node.Full, store, options...) - s.FullNodes = append(s.FullNodes, n) - case node.Light: - n = s.newNode(node.Light, store, options...) - s.LightNodes = append(s.LightNodes, n) + default: } - return n + nd := s.newNode(tp, store, options...) + s.nodesMu.Lock() + s.nodes[nd] = struct{}{} + s.nodesMu.Unlock() + return nd } func (s *Swamp) newNode(t node.Type, store nodebuilder.Store, options ...fx.Option) *nodebuilder.Node { @@ -284,50 +297,20 @@ func (s *Swamp) newNode(t node.Type, store nodebuilder.Store, options ...fx.Opti return node } -// RemoveNode removes a node from the swamp's node slice -// this allows reusage of the same var in the test scenario -// if the user needs to stop and start the same node -func (s *Swamp) RemoveNode(n *nodebuilder.Node, t node.Type) error { - var err error - switch t { - case node.Light: - s.LightNodes, err = s.remove(n, s.LightNodes) - return err - case node.Bridge: - s.BridgeNodes, err = s.remove(n, s.BridgeNodes) - return err - case node.Full: - s.FullNodes, err = s.remove(n, s.FullNodes) - return err - default: - return fmt.Errorf("no such type or node") - } -} - -func (s *Swamp) remove(rn *nodebuilder.Node, sn []*nodebuilder.Node) ([]*nodebuilder.Node, error) { - if len(sn) == 1 { - return nil, nil - } - - initSize := len(sn) - for i := 0; i < len(sn); i++ { - if sn[i] == rn { - sn = append(sn[:i], sn[i+1:]...) - i-- - } - } - - if initSize <= len(sn) { - return sn, fmt.Errorf("cannot delete the node") - } - return sn, nil +// StopNode stops the node and removes from Swamp. +// TODO(@Wondertan): For clean and symmetrical API, we may want to add StartNode. +func (s *Swamp) StopNode(ctx context.Context, nd *nodebuilder.Node) { + s.nodesMu.Lock() + delete(s.nodes, nd) + s.nodesMu.Unlock() + require.NoError(s.t, nd.Stop(ctx)) } // Connect allows to connect peers after hard disconnection. -func (s *Swamp) Connect(t *testing.T, peerA, peerB peer.ID) { - _, err := s.Network.LinkPeers(peerA, peerB) +func (s *Swamp) Connect(t *testing.T, peerA, peerB *nodebuilder.Node) { + _, err := s.Network.LinkPeers(peerA.Host.ID(), peerB.Host.ID()) require.NoError(t, err) - _, err = s.Network.ConnectPeers(peerA, peerB) + _, err = s.Network.ConnectPeers(peerA.Host.ID(), peerB.Host.ID()) require.NoError(t, err) } @@ -335,7 +318,20 @@ func (s *Swamp) Connect(t *testing.T, peerA, peerB peer.ID) { // re-establish it. Order is very important here. We have to unlink peers first, and only after // that call disconnect. This is hard disconnect and peers will not be able to reconnect. // In order to reconnect peers again, please use swamp.Connect -func (s *Swamp) Disconnect(t *testing.T, peerA, peerB peer.ID) { - require.NoError(t, s.Network.UnlinkPeers(peerA, peerB)) - require.NoError(t, s.Network.DisconnectPeers(peerA, peerB)) +func (s *Swamp) Disconnect(t *testing.T, peerA, peerB *nodebuilder.Node) { + require.NoError(t, s.Network.UnlinkPeers(peerA.Host.ID(), peerB.Host.ID())) + require.NoError(t, s.Network.DisconnectPeers(peerA.Host.ID(), peerB.Host.ID())) +} + +// SetBootstrapper sets the given bootstrappers as the "bootstrappers" for the +// Swamp test suite. Every new full or light node created on the suite afterwards +// will automatically add the suite's bootstrappers as trusted peers to their config. +// NOTE: Bridge nodes do not automaatically add the bootstrappers as trusted peers. +// NOTE: Use `NewNodeWithStore` to avoid this automatic configuration. +func (s *Swamp) SetBootstrapper(t *testing.T, bootstrappers ...*nodebuilder.Node) { + for _, trusted := range bootstrappers { + addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(trusted.Host)) + require.NoError(t, err) + s.Bootstrappers = append(s.Bootstrappers, addrs[0]) + } } diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go index 68f33bc61b..dfa3577599 100644 --- a/nodebuilder/tests/sync_test.go +++ b/nodebuilder/tests/sync_test.go @@ -5,73 +5,187 @@ import ( "testing" "time" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/celestiaorg/celestia-node/nodebuilder" - "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" ) // Common consts for tests producing filled blocks const ( - blocks = 20 - bsize = 16 - btime = time.Millisecond * 300 + numBlocks = 20 + bsize = 16 + btime = time.Millisecond * 300 ) /* -Test-Case: Sync a Light Node with a Bridge Node(includes DASing of non-empty blocks) +Test-Case: Header and block/sample sync against a Bridge Node of non-empty blocks. + Steps: 1. Create a Bridge Node(BN) 2. Start a BN 3. Check BN is synced to height 20 -4. Create a Light Node(LN) with a trusted peer + +Light node: +4. Create a Light Node (LN) with bridge as a trusted peer 5. Start a LN with a defined connection to the BN -6. Check LN is synced to height 30 +6. Check LN is header-synced to height 20 +7. Wait until LN has sampled height 20 +8. Wait for LN DASer to catch up to network head + +Full node: +4. Create a Full Node (FN) with bridge as a trusted peer +5. Start a FN with a defined connection to the BN +6. Check FN is header-synced to height 20 +7. Wait until FN has synced block at height 20 +8. Wait for FN DASer to catch up to network head */ -func TestSyncLightWithBridge(t *testing.T) { +func TestSyncAgainstBridge_NonEmptyChain(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) - fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) + // wait for core network to fill 20 blocks + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + // create a bridge node and set it as the bootstrapper for the suite bridge := sw.NewBridgeNode() - - sw.WaitTillHeight(ctx, 20) - + sw.SetBootstrapper(t, bridge) + // start bridge and wait for it to sync to 20 err := bridge.Start(ctx) require.NoError(t, err) - h, err := bridge.HeaderServ.WaitForHeight(ctx, 20) + h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + t.Run("light sync against bridge", func(t *testing.T) { + // create a light node that is connected to the bridge node as + // a bootstrapper + light := sw.NewLightNode() + // start light node and wait for it to sync 20 blocks + err = light.Start(ctx) + require.NoError(t, err) + h, err = light.HeaderServ.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check that the light node has also sampled over the block at height 20 + err = light.ShareServ.SharesAvailable(ctx, h.DAH) + assert.NoError(t, err) + + // wait until the entire chain (up to network head) has been sampled + err = light.DASer.WaitCatchUp(ctx) + require.NoError(t, err) + }) + + t.Run("full sync against bridge", func(t *testing.T) { + // create a full node with bridge node as its bootstrapper + full := sw.NewFullNode() + // let full node sync 20 blocks + err = full.Start(ctx) + require.NoError(t, err) + h, err = full.HeaderServ.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check to ensure the full node can sync the 20th block's data + err = full.ShareServ.SharesAvailable(ctx, h.DAH) + assert.NoError(t, err) + + // wait for full node to sync up the blocks from genesis -> network head. + err = full.DASer.WaitCatchUp(ctx) + require.NoError(t, err) + }) + + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } +} - require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) +/* +Test-Case: Header and block/sample sync against a Bridge Node of empty blocks. - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) +Steps: +1. Create a Bridge Node(BN) +2. Start a BN +3. Check BN is synced to height 20 - cfg := nodebuilder.DefaultConfig(node.Light) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - light := sw.NewNodeWithConfig(node.Light, cfg) +Light node: +4. Create a Light Node (LN) with bridge as a trusted peer +5. Start a LN with a defined connection to the BN +6. Check LN is header-synced to height 20 +7. Wait until LN has sampled height 20 +8. Wait for LN DASer to catch up to network head + +Full node: +4. Create a Full Node (FN) with bridge as a trusted peer +5. Start a FN with a defined connection to the BN +6. Check FN is header-synced to height 20 +7. Wait until FN has synced block at height 20 +8. Wait for FN DASer to catch up to network head +*/ +func TestSyncAgainstBridge_EmptyChain(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) + t.Cleanup(cancel) - err = light.Start(ctx) - require.NoError(t, err) + sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) + sw.WaitTillHeight(ctx, numBlocks) - h, err = light.HeaderServ.WaitForHeight(ctx, 30) + // create bridge node and set it as the bootstrapper for the suite + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // start bridge and wait for it to sync to 20 + err := bridge.Start(ctx) require.NoError(t, err) - - err = light.ShareServ.SharesAvailable(ctx, h.DAH) - assert.NoError(t, err) - - err = light.DASer.WaitCatchUp(ctx) + h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - require.NoError(t, <-fillDn) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + t.Run("light sync against bridge", func(t *testing.T) { + // create a light node that is connected to the bridge node as + // a bootstrapper + light := sw.NewLightNode() + // start light node and wait for it to sync 20 blocks + err = light.Start(ctx) + require.NoError(t, err) + h, err = light.HeaderServ.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check that the light node has also sampled over the block at height 20 + err = light.ShareServ.SharesAvailable(ctx, h.DAH) + assert.NoError(t, err) + + // wait until the entire chain (up to network head) has been sampled + err = light.DASer.WaitCatchUp(ctx) + require.NoError(t, err) + }) + + t.Run("full sync against bridge", func(t *testing.T) { + // create a full node with bridge node as its bootstrapper + full := sw.NewFullNode() + // let full node sync 20 blocks + err = full.Start(ctx) + require.NoError(t, err) + h, err = full.HeaderServ.WaitForHeight(ctx, numBlocks) + require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) + + // check to ensure the full node can sync the 20th block's data + err = full.ShareServ.SharesAvailable(ctx, h.DAH) + assert.NoError(t, err) + + // wait for full node to sync up the blocks from genesis -> network head. + err = full.DASer.WaitCatchUp(ctx) + require.NoError(t, err) + }) } /* @@ -85,107 +199,57 @@ Steps: 3. Check BN is synced to height 20 4. Create a Light Node(LN) with a trusted peer 5. Start a LN with a defined connection to the BN -6. Check LN is synced to height 30 -7. Stop LN -8. Start LN +6. Check LN is synced to height 20 +7. Disconnect LN from BN for 3 seconds while BN continues broadcasting new blocks from core +8. Re-connect LN and let it sync up again 9. Check LN is synced to height 40 */ func TestSyncStartStopLightWithBridge(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) - t.Cleanup(cancel) + defer cancel() - sw.WaitTillHeight(ctx, 50) + sw := swamp.NewSwamp(t) + // wait for core network to fill 20 blocks + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + // create bridge and set it as a bootstrapper + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // and let bridge node sync up 20 blocks err := bridge.Start(ctx) require.NoError(t, err) - - h, err := bridge.HeaderServ.WaitForHeight(ctx, 20) + h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) - require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + // create a light node and connect it to the bridge node as a bootstrapper + light := sw.NewLightNode() + // start light node and let it sync to 20 + err = light.Start(ctx) require.NoError(t, err) - - cfg := nodebuilder.DefaultConfig(node.Light) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - light := sw.NewNodeWithConfig(node.Light, cfg) - require.NoError(t, light.Start(ctx)) - - h, err = light.HeaderServ.WaitForHeight(ctx, 30) + h, err = light.HeaderServ.WaitForHeight(ctx, numBlocks) require.NoError(t, err) + require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) - require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) + sw.StopNode(ctx, light) - require.NoError(t, light.Stop(ctx)) - require.NoError(t, sw.RemoveNode(light, node.Light)) - - cfg = nodebuilder.DefaultConfig(node.Light) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - light = sw.NewNodeWithConfig(node.Light, cfg) + light = sw.NewLightNode() require.NoError(t, light.Start(ctx)) + // ensure when light node comes back up, it can sync the remainder of the chain it + // missed while sleeping h, err = light.HeaderServ.WaitForHeight(ctx, 40) require.NoError(t, err) - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 40)) -} - -/* -Test-Case: Sync a Full Node with a Bridge Node(includes DASing of non-empty blocks) -Steps: -1. Create a Bridge Node(BN) -2. Start a BN -3. Check BN is synced to height 20 -4. Create a Full Node(FN) with a connection to BN as a trusted peer -5. Start a FN -6. Check FN is synced to height 30 -*/ -func TestSyncFullWithBridge(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) - t.Cleanup(cancel) - - sw := swamp.NewSwamp(t, swamp.WithBlockTime(btime)) - fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, blocks) - - bridge := sw.NewBridgeNode() - - sw.WaitTillHeight(ctx, 20) - - err := bridge.Start(ctx) - require.NoError(t, err) - - h, err := bridge.HeaderServ.WaitForHeight(ctx, 20) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) - require.NoError(t, err) - - cfg := nodebuilder.DefaultConfig(node.Full) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - cfg.Share.UseShareExchange = false - full := sw.NewNodeWithConfig(node.Full, cfg) - require.NoError(t, full.Start(ctx)) - - h, err = full.HeaderServ.WaitForHeight(ctx, 30) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - - err = full.ShareServ.SharesAvailable(ctx, h.DAH) - assert.NoError(t, err) - - err = full.DASer.WaitCatchUp(ctx) - require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - require.NoError(t, <-fillDn) + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } } /* @@ -199,59 +263,69 @@ Steps: 3. Check BN is synced to height 20 4. Create a Full Node(FN) with a connection to BN as a trusted peer 5. Start a FN -6. Check FN is synced to height 30 +6. Check FN is synced to network head 7. Create a Light Node(LN) with a connection to FN as a trusted peer -8. Start LN -9. Check LN is synced to height 50 +8. Ensure LN is NOT connected to BN and only connected to FN +9. Start LN +10. Check LN is synced to network head */ -func TestSyncLightWithFull(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - +func TestSyncLightAgainstFull(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) - sw.WaitTillHeight(ctx, 20) + sw := swamp.NewSwamp(t) + // wait for the core network to fill up 20 blocks + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + // create bridge and set it as a bootstrapper + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // start a bridge node and wait for it to sync up 20 blocks err := bridge.Start(ctx) require.NoError(t, err) - - h, err := bridge.HeaderServ.WaitForHeight(ctx, 20) + h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) require.NoError(t, err) + assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) - - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + // create a FN with BN as a trusted peer + full := sw.NewFullNode() + // start FN and wait for it to sync up to head of BN + err = full.Start(ctx) require.NoError(t, err) - - cfg := nodebuilder.DefaultConfig(node.Full) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - full := sw.NewNodeWithConfig(node.Full, cfg) - require.NoError(t, full.Start(ctx)) - - h, err = full.HeaderServ.WaitForHeight(ctx, 30) + bridgeHead, err := bridge.HeaderServ.LocalHead(ctx) require.NoError(t, err) - - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) - - addrs, err = peer.AddrInfoToP2pAddrs(host.InfoFromHost(full.Host)) + _, err = full.HeaderServ.WaitForHeight(ctx, uint64(bridgeHead.Height())) require.NoError(t, err) - cfg = nodebuilder.DefaultConfig(node.Light) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - light := sw.NewNodeWithConfig(node.Light, cfg) + // reset suite bootstrapper list and set full node as a bootstrapper for + // LN to connect to + sw.Bootstrappers = make([]ma.Multiaddr, 0) + sw.SetBootstrapper(t, full) + // create an LN with FN as a trusted peer + light := sw.NewLightNode() + + // ensure there is no direct connection between LN and BN so that + // LN relies only on FN for syncing err = sw.Network.UnlinkPeers(bridge.Host.ID(), light.Host.ID()) require.NoError(t, err) + // start LN and wait for it to sync up to network head against the head of the FN err = light.Start(ctx) require.NoError(t, err) - - h, err = light.HeaderServ.WaitForHeight(ctx, 50) + fullHead, err := full.HeaderServ.LocalHead(ctx) + require.NoError(t, err) + _, err = light.HeaderServ.WaitForHeight(ctx, uint64(fullHead.Height())) require.NoError(t, err) - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 50)) + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } } /* @@ -265,54 +339,54 @@ Steps: 3. Check BN is synced to height 20 4. Create a Full Node(FN) with a connection to BN as a trusted peer 5. Start a FN -6. Check FN is synced to height 30 -7. Create a Light Node(LN) with a connection to BN, FN as trusted peers +6. Check FN is synced to network head +7. Create a Light Node(LN) with a connection to BN and FN as trusted peers 8. Start LN -9. Check LN is synced to height 50 +9. Check LN is synced to network head. */ func TestSyncLightWithTrustedPeers(t *testing.T) { - sw := swamp.NewSwamp(t) - - bridge := sw.NewBridgeNode() - ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) - sw.WaitTillHeight(ctx, 20) + sw := swamp.NewSwamp(t) + fillDn := swamp.FillBlocks(ctx, sw.ClientContext, sw.Accounts, bsize, numBlocks) + sw.WaitTillHeight(ctx, numBlocks) + // create a BN and set as a bootstrapper + bridge := sw.NewBridgeNode() + sw.SetBootstrapper(t, bridge) + // let it sync to network head err := bridge.Start(ctx) require.NoError(t, err) - - h, err := bridge.HeaderServ.WaitForHeight(ctx, 20) + _, err = bridge.HeaderServ.WaitForHeight(ctx, numBlocks) require.NoError(t, err) - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 20)) + // create a FN with BN as trusted peer + full := sw.NewFullNode() - addrs, err := peer.AddrInfoToP2pAddrs(host.InfoFromHost(bridge.Host)) + // let FN sync to network head + err = full.Start(ctx) require.NoError(t, err) - - cfg := nodebuilder.DefaultConfig(node.Full) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - full := sw.NewNodeWithConfig(node.Full, cfg) - require.NoError(t, full.Start(ctx)) - - h, err = full.HeaderServ.WaitForHeight(ctx, 30) + err = full.HeaderServ.SyncWait(ctx) require.NoError(t, err) - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 30)) + // add full node as a bootstrapper for the suite + sw.SetBootstrapper(t, full) - addrs, err = peer.AddrInfoToP2pAddrs(host.InfoFromHost(full.Host)) - require.NoError(t, err) - - cfg = nodebuilder.DefaultConfig(node.Light) - cfg.Header.TrustedPeers = append(cfg.Header.TrustedPeers, addrs[0].String()) - light := sw.NewNodeWithConfig(node.Light, cfg) + // create a LN with both FN and BN as trusted peers + light := sw.NewLightNode() + // let LN sync to network head err = light.Start(ctx) require.NoError(t, err) - - h, err = light.HeaderServ.WaitForHeight(ctx, 50) + err = light.HeaderServ.SyncWait(ctx) require.NoError(t, err) - assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 50)) + // wait for the core block filling process to exit + select { + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case err := <-fillDn: + require.NoError(t, err) + } } diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go index 8df05d7d53..ea0f06c138 100644 --- a/share/eds/eds_test.go +++ b/share/eds/eds_test.go @@ -39,7 +39,7 @@ func TestQuadrantOrder(t *testing.T) { {"smol", 2}, {"still smol", 8}, {"default mainnet", appconsts.DefaultGovMaxSquareSize}, - {"max", appconsts.MaxSquareSize}, + {"max", share.MaxSquareSize}, } testShareSize := 64 diff --git a/share/eds/store.go b/share/eds/store.go index f01e96a24b..f0d02a1141 100644 --- a/share/eds/store.go +++ b/share/eds/store.go @@ -119,6 +119,10 @@ func (s *Store) Start(ctx context.Context) error { // start Store only if DagStore succeeds ctx, cancel := context.WithCancel(context.Background()) s.cancel = cancel + // initialize empty gc result to avoid panic on access + s.lastGCResult.Store(&dagstore.GCResult{ + Shards: make(map[shard.Key]error), + }) go s.gc(ctx) return nil } @@ -132,10 +136,6 @@ func (s *Store) Stop(context.Context) error { // gc periodically removes all inactive or errored shards. func (s *Store) gc(ctx context.Context) { ticker := time.NewTicker(s.gcInterval) - // initialize empty gc result to avoid panic on access - s.lastGCResult.Store(&dagstore.GCResult{ - Shards: make(map[shard.Key]error), - }) for { select { case <-ctx.Done(): diff --git a/share/get_test.go b/share/get_test.go index 8eafe84cd8..25711e5a65 100644 --- a/share/get_test.go +++ b/share/get_test.go @@ -1,8 +1,12 @@ package share import ( + "bytes" "context" + "crypto/rand" + "errors" mrand "math/rand" + "sort" "strconv" "testing" "time" @@ -141,8 +145,8 @@ func removeRandShares(data [][]byte, d int) [][]byte { } func TestGetSharesByNamespace(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() var tests = []struct { @@ -168,6 +172,9 @@ func TestGetSharesByNamespace(t *testing.T) { for _, row := range eds.RowRoots() { rcid := ipld.MustCidFromNamespacedSha256(row) rowShares, _, err := GetSharesByNamespace(ctx, bServ, rcid, nID, len(eds.RowRoots())) + if errors.Is(err, ipld.ErrNamespaceOutsideRange) { + continue + } require.NoError(t, err) shares = append(shares, rowShares...) @@ -182,8 +189,8 @@ func TestGetSharesByNamespace(t *testing.T) { } func TestCollectLeavesByNamespace_IncompleteData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() shares := RandShares(t, 16) @@ -191,11 +198,7 @@ func TestCollectLeavesByNamespace_IncompleteData(t *testing.T) { // set all shares to the same namespace id nid := shares[0][:NamespaceSize] - for i, nspace := range shares { - if i == len(shares) { - break - } - + for _, nspace := range shares { copy(nspace[:NamespaceSize], nid) } @@ -230,59 +233,53 @@ func TestCollectLeavesByNamespace_IncompleteData(t *testing.T) { } func TestCollectLeavesByNamespace_AbsentNamespaceId(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() shares := RandShares(t, 16) - minNid := make([]byte, NamespaceSize) - midNid := make([]byte, NamespaceSize) - maxNid := make([]byte, NamespaceSize) - - numberOfShares := len(shares) - - copy(minNid, shares[0][:NamespaceSize]) - copy(maxNid, shares[numberOfShares-1][:NamespaceSize]) - copy(midNid, shares[numberOfShares/2][:NamespaceSize]) - - // create min nid missing data by replacing first namespace id with second - minNidMissingData := make([]Share, len(shares)) - copy(minNidMissingData, shares) - copy(minNidMissingData[0][:NamespaceSize], shares[1][:NamespaceSize]) - - // create max nid missing data by replacing last namespace id with second last - maxNidMissingData := make([]Share, len(shares)) - copy(maxNidMissingData, shares) - copy(maxNidMissingData[numberOfShares-1][:NamespaceSize], shares[numberOfShares-2][:NamespaceSize]) + // set all shares to the same namespace id + nids, err := randomNids(5) + require.NoError(t, err) + minNid := nids[0] + minIncluded := nids[1] + midNid := nids[2] + maxIncluded := nids[3] + maxNid := nids[4] - // create mid nid missing data by replacing middle namespace id with the one after - midNidMissingData := make([]Share, len(shares)) - copy(midNidMissingData, shares) - copy(midNidMissingData[numberOfShares/2][:NamespaceSize], shares[(numberOfShares/2)+1][:NamespaceSize]) + secondNamespaceFrom := mrand.Intn(len(shares)-2) + 1 + for i, nspace := range shares { + if i < secondNamespaceFrom { + copy(nspace[:NamespaceSize], minIncluded) + continue + } + copy(nspace[:NamespaceSize], maxIncluded) + } var tests = []struct { name string data []Share missingNid []byte + isAbsence bool }{ - {name: "Namespace id less than the minimum namespace in data", data: minNidMissingData, missingNid: minNid}, - {name: "Namespace id greater than the maximum namespace in data", data: maxNidMissingData, missingNid: maxNid}, - {name: "Namespace id in range but still missing", data: midNidMissingData, missingNid: midNid}, + {name: "Namespace id less than the minimum namespace in data", data: shares, missingNid: minNid}, + {name: "Namespace id greater than the maximum namespace in data", data: shares, missingNid: maxNid}, + {name: "Namespace id in range but still missing", data: shares, missingNid: midNid, isAbsence: true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { eds, err := AddShares(ctx, shares, bServ) require.NoError(t, err) - assertNoRowContainsNID(t, bServ, eds, tt.missingNid) + assertNoRowContainsNID(ctx, t, bServ, eds, tt.missingNid, tt.isAbsence) }) } } func TestCollectLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) bServ := mdutils.Bserv() shares := RandShares(t, 16) @@ -306,6 +303,9 @@ func TestCollectLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testi rcid := ipld.MustCidFromNamespacedSha256(row) data := ipld.NewNamespaceData(len(shares), nid, ipld.WithLeaves()) err := data.CollectLeavesByNamespace(ctx, bServ, rcid) + if errors.Is(err, ipld.ErrNamespaceOutsideRange) { + continue + } assert.Nil(t, err) leaves := data.Leaves() for _, node := range leaves { @@ -317,7 +317,7 @@ func TestCollectLeavesByNamespace_MultipleRowsContainingSameNamespaceId(t *testi } func TestGetSharesWithProofsByNamespace(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) t.Cleanup(cancel) bServ := mdutils.Bserv() @@ -341,7 +341,7 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { } expected := tt.rawData[from] - nID := expected[:NamespaceSize] + nID := namespace.ID(expected[:NamespaceSize]) // change rawData to contain several shares with same nID for i := from; i <= to; i++ { @@ -356,6 +356,10 @@ func TestGetSharesWithProofsByNamespace(t *testing.T) { for _, row := range eds.RowRoots() { rcid := ipld.MustCidFromNamespacedSha256(row) rowShares, proof, err := GetSharesByNamespace(ctx, bServ, rcid, nID, len(eds.RowRoots())) + if ipld.NamespaceIsOutsideRange(row, row, nID) { + require.ErrorIs(t, err, ipld.ErrNamespaceOutsideRange) + continue + } require.NoError(t, err) if len(rowShares) > 0 { require.NotNil(t, proof) @@ -432,10 +436,12 @@ func TestBatchSize(t *testing.T) { } func assertNoRowContainsNID( + ctx context.Context, t *testing.T, bServ blockservice.BlockService, eds *rsmt2d.ExtendedDataSquare, nID namespace.ID, + isAbsent bool, ) { rowRootCount := len(eds.RowRoots()) // get all row root cids @@ -445,11 +451,47 @@ func assertNoRowContainsNID( } // for each row root cid check if the minNID exists - for _, rowCID := range rowRootCIDs { + var absentCount, foundAbsenceRows int + for _, rowRoot := range eds.RowRoots() { + var outsideRange bool + if !ipld.NamespaceIsOutsideRange(rowRoot, rowRoot, nID) { + // nID does belong to namespace range of the row + absentCount++ + } else { + outsideRange = true + } data := ipld.NewNamespaceData(rowRootCount, nID, ipld.WithProofs()) - err := data.CollectLeavesByNamespace(context.Background(), bServ, rowCID) - leaves := data.Leaves() - assert.Nil(t, leaves) - assert.Nil(t, err) + rootCID := ipld.MustCidFromNamespacedSha256(rowRoot) + err := data.CollectLeavesByNamespace(ctx, bServ, rootCID) + if outsideRange { + require.ErrorIs(t, err, ipld.ErrNamespaceOutsideRange) + continue + } + require.NoError(t, err) + + // if no error returned, check absence proof + foundAbsenceRows++ + verified := data.Proof().VerifyNamespace(sha256.New(), nID, nil, rowRoot) + require.True(t, verified) + } + + if isAbsent { + require.Equal(t, foundAbsenceRows, absentCount) + // there should be max 1 row that has namespace range containing nID + require.LessOrEqual(t, absentCount, 1) + } +} + +func randomNids(total int) ([]namespace.ID, error) { + namespaces := make([]namespace.ID, total) + for i := range namespaces { + nid := make([]byte, NamespaceSize) + _, err := rand.Read(nid) + if err != nil { + return nil, err + } + namespaces[i] = nid } + sort.Slice(namespaces, func(i, j int) bool { return bytes.Compare(namespaces[i], namespaces[j]) < 0 }) + return namespaces, nil } diff --git a/share/getter.go b/share/getter.go index f7a7b9c129..6ba729c71a 100644 --- a/share/getter.go +++ b/share/getter.go @@ -10,6 +10,8 @@ import ( "github.com/celestiaorg/nmt" "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/rsmt2d" + + "github.com/celestiaorg/celestia-node/share/ipld" ) var ( @@ -58,7 +60,7 @@ type NamespacedRow struct { func (ns NamespacedShares) Verify(root *Root, nID namespace.ID) error { originalRoots := make([][]byte, 0) for _, row := range root.RowRoots { - if !nID.Less(nmt.MinNamespace(row, nID.Size())) && nID.LessOrEqual(nmt.MaxNamespace(row, nID.Size())) { + if !ipld.NamespaceIsOutsideRange(row, row, nID) { originalRoots = append(originalRoots, row) } } diff --git a/share/getters/cascade.go b/share/getters/cascade.go index d65b902e81..1a0d8fb274 100644 --- a/share/getters/cascade.go +++ b/share/getters/cascade.go @@ -122,16 +122,18 @@ func cascadeGetters[V any]( getCtx, cancel := ctxWithSplitTimeout(ctx, len(getters)-i, 0) val, getErr := get(getCtx, getter) cancel() - if getErr == nil { - return val, nil + if getErr == nil || errors.Is(getErr, share.ErrNamespaceNotFound) { + return val, getErr } - if errors.Is(share.ErrNamespaceNotFound, getErr) { - return zero, getErr + + if errors.Is(getErr, errOperationNotSupported) { + continue } - if !errors.Is(getErr, errOperationNotSupported) { - err = errors.Join(err, getErr) - span.RecordError(getErr, trace.WithAttributes(attribute.Int("getter_idx", i))) + err = errors.Join(err, getErr) + span.RecordError(getErr, trace.WithAttributes(attribute.Int("getter_idx", i))) + if ctx.Err() != nil { + return zero, err } } return zero, err diff --git a/share/getters/cascade_test.go b/share/getters/cascade_test.go index e3a324c5e9..d955c50682 100644 --- a/share/getters/cascade_test.go +++ b/share/getters/cascade_test.go @@ -52,6 +52,7 @@ func TestCascade(t *testing.T) { timeoutGetter := mocks.NewMockGetter(ctrl) immediateFailGetter := mocks.NewMockGetter(ctrl) successGetter := mocks.NewMockGetter(ctrl) + ctxGetter := mocks.NewMockGetter(ctrl) timeoutGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). DoAndReturn(func(ctx context.Context, _ *share.Root) (*rsmt2d.ExtendedDataSquare, error) { return nil, context.DeadlineExceeded @@ -60,6 +61,10 @@ func TestCascade(t *testing.T) { Return(nil, errors.New("second getter fails immediately")).AnyTimes() successGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). Return(nil, nil).AnyTimes() + ctxGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, _ *share.Root) (*rsmt2d.ExtendedDataSquare, error) { + return nil, ctx.Err() + }).AnyTimes() get := func(ctx context.Context, get share.Getter) (*rsmt2d.ExtendedDataSquare, error) { return get.GetEDS(ctx, nil) @@ -96,6 +101,15 @@ func TestCascade(t *testing.T) { assert.Equal(t, strings.Count(err.Error(), "\n"), 2) }) + t.Run("Context Canceled", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + cancel() + getters := []share.Getter{ctxGetter, ctxGetter, ctxGetter} + _, err := cascadeGetters(ctx, getters, get) + assert.Error(t, err) + assert.Equal(t, strings.Count(err.Error(), "\n"), 0) + }) + t.Run("Single", func(t *testing.T) { getters := []share.Getter{successGetter} _, err := cascadeGetters(ctx, getters, get) diff --git a/share/getters/utils.go b/share/getters/utils.go index c99a7689b8..c0b882f330 100644 --- a/share/getters/utils.go +++ b/share/getters/utils.go @@ -15,7 +15,6 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" - "github.com/celestiaorg/nmt" "github.com/celestiaorg/nmt/namespace" "github.com/celestiaorg/celestia-node/libs/utils" @@ -35,7 +34,7 @@ var ( func filterRootsByNamespace(root *share.Root, nID namespace.ID) []cid.Cid { rowRootCIDs := make([]cid.Cid, 0, len(root.RowRoots)) for _, row := range root.RowRoots { - if !nID.Less(nmt.MinNamespace(row, nID.Size())) && nID.LessOrEqual(nmt.MaxNamespace(row, nID.Size())) { + if !ipld.NamespaceIsOutsideRange(row, row, nID) { rowRootCIDs = append(rowRootCIDs, ipld.MustCidFromNamespacedSha256(row)) } } diff --git a/share/ipld/get.go b/share/ipld/get.go index 70385f73f7..6d7de35c27 100644 --- a/share/ipld/get.go +++ b/share/ipld/get.go @@ -103,7 +103,7 @@ func GetLeaves(ctx context.Context, // this buffer ensures writes to 'jobs' are never blocking (bin-tree-feat) jobs := make(chan *job, (maxShares+1)/2) // +1 for the case where 'maxShares' is 1 - jobs <- &job{id: root, ctx: ctx} + jobs <- &job{cid: root, ctx: ctx} // total is an amount of routines spawned and total amount of nodes we process (bin-tree-feat) // so we can specify exact amount of loops we do, and wait for this amount // of routines to finish processing @@ -123,11 +123,11 @@ func GetLeaves(ctx context.Context, defer wg.Done() span.SetAttributes( - attribute.String("cid", j.id.String()), + attribute.String("cid", j.cid.String()), attribute.Int("pos", j.sharePos), ) - nd, err := GetNode(ctx, bGetter, j.id) + nd, err := GetNode(ctx, bGetter, j.cid) if err != nil { // we don't really care about errors here // just fetch as much as possible @@ -149,7 +149,7 @@ func GetLeaves(ctx context.Context, // send those to be processed select { case jobs <- &job{ - id: lnk.Cid, + cid: lnk.Cid, // calc position for children nodes (bin-tree-feat), // s.t. 'if' above knows where to put a share sharePos: j.sharePos*2 + i, @@ -213,7 +213,7 @@ func GetProof( // chanGroup implements an atomic wait group, closing a jobs chan // when fully done. type chanGroup struct { - jobs chan *job + jobs chan job counter int64 } @@ -233,8 +233,29 @@ func (w *chanGroup) done() { // job represents an encountered node to investigate during the `GetLeaves` // and `CollectLeavesByNamespace` routines. type job struct { - id cid.Cid + // we pass the context to job so that spans are tracked in a tree + // structure + ctx context.Context + // cid of the node that will be handled + cid cid.Cid + // sharePos represents potential share position in share slice sharePos int - depth int - ctx context.Context + // depth represents the number of edges present in path from the root node of a tree to that node + depth int + // isAbsent indicates if target namespaceID is not included, only collect absence proofs + isAbsent bool +} + +func (j job) next(direction direction, cid cid.Cid, isAbsent bool) job { + var i int + if direction == right { + i++ + } + return job{ + ctx: j.ctx, + cid: cid, + sharePos: j.sharePos*2 + i, + depth: j.depth + 1, + isAbsent: isAbsent, + } } diff --git a/share/ipld/namespace_data.go b/share/ipld/namespace_data.go index 9698ef990d..b38e5884b5 100644 --- a/share/ipld/namespace_data.go +++ b/share/ipld/namespace_data.go @@ -2,6 +2,7 @@ package ipld import ( "context" + "encoding/hex" "errors" "fmt" "sync" @@ -17,6 +18,9 @@ import ( "github.com/celestiaorg/nmt/namespace" ) +var ErrNamespaceOutsideRange = errors.New("share/ipld: " + + "target namespace id is outside of namespace range for the given root") + // Option is the functional option that is applied to the NamespaceData instance // to configure data that needs to be stored. type Option func(*NamespaceData) @@ -39,11 +43,15 @@ func WithProofs() Option { // NamespaceData stores all leaves under the given namespace with their corresponding proofs. type NamespaceData struct { - leaves []ipld.Node - proofs *proofCollector + leaves []ipld.Node + proofs *proofCollector + bounds fetchedBounds maxShares int nID namespace.ID + + isAbsentNamespace atomic.Bool + absenceProofLeaf ipld.Node } func NewNamespaceData(maxShares int, nID namespace.ID, options ...Option) *NamespaceData { @@ -62,7 +70,7 @@ func NewNamespaceData(maxShares int, nID namespace.ID, options ...Option) *Names return data } -func (n *NamespaceData) validate() error { +func (n *NamespaceData) validate(rootCid cid.Cid) error { if len(n.nID) != NamespaceSize { return fmt.Errorf("expected namespace ID of size %d, got %d", NamespaceSize, len(n.nID)) } @@ -70,6 +78,11 @@ func (n *NamespaceData) validate() error { if n.leaves == nil && n.proofs == nil { return errors.New("share/ipld: empty NamespaceData, nothing specified to retrieve") } + + root := NamespacedSha256FromCID(rootCid) + if NamespaceIsOutsideRange(root, root, n.nID) { + return ErrNamespaceOutsideRange + } return nil } @@ -77,6 +90,14 @@ func (n *NamespaceData) addLeaf(pos int, nd ipld.Node) { // bounds will be needed in `Proof` method n.bounds.update(int64(pos)) + if n.isAbsentNamespace.Load() { + if n.absenceProofLeaf != nil { + log.Fatal("there should be only one absence leaf") + } + n.absenceProofLeaf = nd + return + } + if n.leaves == nil { return } @@ -139,6 +160,16 @@ func (n *NamespaceData) Proof() *nmt.Proof { nodes[i] = NamespacedSha256FromCID(node) } + if n.isAbsentNamespace.Load() { + proof := nmt.NewAbsenceProof( + int(n.bounds.lowest), + int(n.bounds.highest)+1, + nodes, + NamespacedSha256FromCID(n.absenceProofLeaf.Cid()), + NMTIgnoreMaxNamespace, + ) + return &proof + } proof := nmt.NewInclusionProof( int(n.bounds.lowest), int(n.bounds.highest)+1, @@ -158,7 +189,7 @@ func (n *NamespaceData) CollectLeavesByNamespace( bGetter blockservice.BlockGetter, root cid.Cid, ) error { - if err := n.validate(); err != nil { + if err := n.validate(root); err != nil { return err } @@ -166,14 +197,14 @@ func (n *NamespaceData) CollectLeavesByNamespace( defer span.End() span.SetAttributes( - attribute.String("namespace", n.nID.String()), + attribute.String("namespace", hex.EncodeToString(n.nID)), attribute.String("root", root.String()), ) // buffer the jobs to avoid blocking, we only need as many // queued as the number of shares in the second-to-last layer - jobs := make(chan *job, (n.maxShares+1)/2) - jobs <- &job{id: root, ctx: ctx} + jobs := make(chan job, (n.maxShares+1)/2) + jobs <- job{cid: root, ctx: ctx} var wg chanGroup wg.jobs = jobs @@ -185,7 +216,7 @@ func (n *NamespaceData) CollectLeavesByNamespace( ) for { - var j *job + var j job var ok bool select { case j, ok = <-jobs: @@ -202,19 +233,19 @@ func (n *NamespaceData) CollectLeavesByNamespace( defer wg.done() span.SetAttributes( - attribute.String("cid", j.id.String()), + attribute.String("cid", j.cid.String()), attribute.Int("pos", j.sharePos), ) // if an error is likely to be returned or not depends on // the underlying impl of the blockservice, currently it is not a realistic probability - nd, err := GetNode(ctx, bGetter, j.id) + nd, err := GetNode(ctx, bGetter, j.cid) if err != nil { singleErr.Do(func() { retrievalErr = err }) - log.Errorw("getLeavesWithProofsByNamespace:could not retrieve node", - "nID", n.nID, + log.Errorw("could not retrieve IPLD node", + "nID", hex.EncodeToString(n.nID), "pos", j.sharePos, "err", err, ) @@ -234,38 +265,11 @@ func (n *NamespaceData) CollectLeavesByNamespace( } // this node has links in the namespace, so keep walking - for i, lnk := range links { - newJob := &job{ - id: lnk.Cid, - // sharePos represents potential share position in share slice - sharePos: j.sharePos*2 + i, - // depth represents the number of edges present in path from the root node of a tree to that node - depth: j.depth + 1, - // we pass the context to job so that spans are tracked in a tree - // structure - ctx: ctx, - } - // if the link's nID isn't in range we don't need to create a new job for it, - // but need to collect a proof - jobNid := NamespacedSha256FromCID(newJob.id) - - // proof is on the right side, if the nID is less than min namespace of jobNid - if n.nID.Less(nmt.MinNamespace(jobNid, n.nID.Size())) { - n.addProof(right, lnk.Cid, newJob.depth) - continue - } - - // proof is on the left side, if the nID is bigger than max namespace of jobNid - if !n.nID.LessOrEqual(nmt.MaxNamespace(jobNid, n.nID.Size())) { - n.addProof(left, lnk.Cid, newJob.depth) - continue - } - - // by passing the previous check, we know we will have one more node to process - // note: it is important to increase the counter before sending to the channel + newJobs := n.traverseLinks(j, links) + for _, j := range newJobs { wg.add(1) select { - case jobs <- newJob: + case jobs <- j: case <-ctx.Done(): return } @@ -274,6 +278,57 @@ func (n *NamespaceData) CollectLeavesByNamespace( } } +func (n *NamespaceData) traverseLinks(j job, links []*ipld.Link) []job { + if j.isAbsent { + return n.collectAbsenceProofs(j, links) + } + return n.collectNDWithProofs(j, links) +} + +func (n *NamespaceData) collectAbsenceProofs(j job, links []*ipld.Link) []job { + leftLink := links[0].Cid + rightLink := links[1].Cid + // traverse to the left node, while collecting right node as proof + n.addProof(right, rightLink, j.depth) + return []job{j.next(left, leftLink, j.isAbsent)} +} + +func (n *NamespaceData) collectNDWithProofs(j job, links []*ipld.Link) []job { + leftCid := links[0].Cid + rightCid := links[1].Cid + leftLink := NamespacedSha256FromCID(leftCid) + rightLink := NamespacedSha256FromCID(rightCid) + + var nextJobs []job + // check if target namespace is outside of boundaries of both links + if NamespaceIsOutsideRange(leftLink, rightLink, n.nID) { + log.Fatalf("target namespace outside of boundaries of links at depth: %v", j.depth) + } + + if !NamespaceIsAboveMax(leftLink, n.nID) { + // namespace is within the range of left link + nextJobs = append(nextJobs, j.next(left, leftCid, false)) + } else { + // proof is on the left side, if the nID is on the right side of the range of left link + n.addProof(left, leftCid, j.depth) + if NamespaceIsBelowMin(rightLink, n.nID) { + // namespace is not included in either links, convert to absence collector + n.isAbsentNamespace.Store(true) + nextJobs = append(nextJobs, j.next(right, rightCid, true)) + return nextJobs + } + } + + if !NamespaceIsBelowMin(rightLink, n.nID) { + // namespace is within the range of right link + nextJobs = append(nextJobs, j.next(right, rightCid, false)) + } else { + // proof is on the right side, if the nID is on the left side of the range of right link + n.addProof(right, rightCid, j.depth) + } + return nextJobs +} + type fetchedBounds struct { lowest int64 highest int64 diff --git a/share/ipld/nmt.go b/share/ipld/nmt.go index e5e7d41cd1..df140ef8c7 100644 --- a/share/ipld/nmt.go +++ b/share/ipld/nmt.go @@ -20,6 +20,7 @@ import ( "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/namespace" ) var ( @@ -38,10 +39,6 @@ const ( // that contain an NMT node (inner and leaf nodes). sha256NamespaceFlagged = 0x7701 - // MaxSquareSize is currently the maximum size supported for unerasured data in - // rsmt2d.ExtendedDataSquare. - MaxSquareSize = appconsts.MaxSquareSize - // NamespaceSize is a system-wide size for NMT namespaces. NamespaceSize = appconsts.NamespaceSize @@ -65,6 +62,12 @@ const ( NMTIgnoreMaxNamespace = true ) +var ( + // MaxSquareSize is currently the maximum size supported for unerasured data in + // rsmt2d.ExtendedDataSquare. + MaxSquareSize = appconsts.SquareSizeUpperBound(appconsts.LatestVersion) +) + func init() { // required for Bitswap to hash and verify inbound data correctly mhcore.Register(sha256NamespaceFlagged, func() hash.Hash { @@ -179,3 +182,21 @@ func Translate(dah *da.DataAvailabilityHeader, row, col int) (cid.Cid, int) { func NamespacedSha256FromCID(cid cid.Cid) []byte { return cid.Hash()[cidPrefixSize:] } + +// NamespaceIsAboveMax checks if the target namespace is above the maximum namespace for a given +// node hash. +func NamespaceIsAboveMax(nodeHash []byte, target namespace.ID) bool { + return !target.LessOrEqual(nmt.MaxNamespace(nodeHash, target.Size())) +} + +// NamespaceIsBelowMin checks if the target namespace is below the minimum namespace for a given +// node hash. +func NamespaceIsBelowMin(nodeHash []byte, target namespace.ID) bool { + return target.Less(nmt.MinNamespace(nodeHash, target.Size())) +} + +// NamespaceIsOutsideRange checks if the target namespace is outside the range defined by the left +// and right nodes +func NamespaceIsOutsideRange(leftNodeHash, rightNodeHash []byte, target namespace.ID) bool { + return NamespaceIsBelowMin(leftNodeHash, target) || NamespaceIsAboveMax(rightNodeHash, target) +} diff --git a/share/mocks/getter.go b/share/mocks/getter.go index 12c36cb015..1c73c9170d 100644 --- a/share/mocks/getter.go +++ b/share/mocks/getter.go @@ -8,12 +8,11 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - da "github.com/celestiaorg/celestia-app/pkg/da" share "github.com/celestiaorg/celestia-node/share" namespace "github.com/celestiaorg/nmt/namespace" rsmt2d "github.com/celestiaorg/rsmt2d" + gomock "github.com/golang/mock/gomock" ) // MockGetter is a mock of Getter interface. diff --git a/share/nid.go b/share/nid.go new file mode 100644 index 0000000000..b7fd4e5836 --- /dev/null +++ b/share/nid.go @@ -0,0 +1,28 @@ +package share + +import ( + "fmt" + + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/nmt/namespace" +) + +// NewNamespaceV0 takes a variable size byte slice and creates a version 0 Namespace ID. +// The byte slice must be <= 10 bytes. +// If it is less than 10 bytes, it will be left padded to size 10 with 0s. +func NewNamespaceV0(subNId []byte) (namespace.ID, error) { + if lnid := len(subNId); lnid > appns.NamespaceVersionZeroIDSize { + return nil, fmt.Errorf("namespace id must be <= %v, but it was %v bytes", appns.NamespaceVersionZeroIDSize, lnid) + } + + id := make([]byte, appns.NamespaceIDSize) + leftPaddingOffset := appns.NamespaceVersionZeroIDSize - len(subNId) + copy(id[appns.NamespaceVersionZeroPrefixSize+leftPaddingOffset:], subNId) + + appID, err := appns.New(appns.NamespaceVersionZero, id) + if err != nil { + return nil, err + } + + return appID.Bytes(), nil +} diff --git a/share/nid_test.go b/share/nid_test.go new file mode 100644 index 0000000000..8f83d430e3 --- /dev/null +++ b/share/nid_test.go @@ -0,0 +1,56 @@ +package share + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/celestiaorg/nmt/namespace" +) + +func TestNewNamespaceV0(t *testing.T) { + type testCase struct { + name string + subNid []byte + expected namespace.ID + wantErr bool + } + testCases := []testCase{ + { + name: "8 byte subNid, gets left padded", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + expected: namespace.ID{ + 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros + 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, // id with left padding + wantErr: false, + }, + { + name: "10 byte subNid, no padding", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10}, + expected: namespace.ID{ + 0x0, // version + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // filled zeros + 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x10}, // id + wantErr: false, + }, + { + name: "11 byte subNid", + subNid: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x9, 0x10, 0x11}, + expected: []byte{}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NewNamespaceV0(tc.subNid) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/share/p2p/discovery/discovery.go b/share/p2p/discovery/discovery.go index 01001376d5..96eda8bd78 100644 --- a/share/p2p/discovery/discovery.go +++ b/share/p2p/discovery/discovery.go @@ -28,16 +28,15 @@ const ( // (by default it is 16) eventbusBufSize = 64 - // findPeersStuckWarnDelay is the duration after which discover will log an error message to - // notify that it is stuck. - findPeersStuckWarnDelay = time.Minute + // findPeersTimeout limits the FindPeers operation in time + findPeersTimeout = time.Minute - // defaultRetryTimeout defines time interval between discovery attempts. - defaultRetryTimeout = time.Second + // retryTimeout defines time interval between discovery and advertise attempts. + retryTimeout = time.Second ) -// defaultRetryTimeout defines time interval between discovery attempts. -var discoveryRetryTimeout = defaultRetryTimeout +// discoveryRetryTimeout defines time interval between discovery attempts, needed for tests +var discoveryRetryTimeout = retryTimeout // Discovery combines advertise and discover services and allows to store discovered nodes. // TODO: The code here gets horribly hairy, so we should refactor this at some point @@ -166,7 +165,9 @@ func (d *Discovery) Advertise(ctx context.Context) { } log.Warnw("error advertising", "rendezvous", rendezvousPoint, "err", err) - errTimer := time.NewTimer(time.Minute) + // we don't want retry indefinitely in busy loop + // internal discovery mechanism may need some time before attempts + errTimer := time.NewTimer(retryTimeout) select { case <-errTimer.C: errTimer.Stop() @@ -257,8 +258,7 @@ func (d *Discovery) discover(ctx context.Context) bool { // limit to minimize chances of overreaching the limit wg.SetLimit(int(d.set.Limit())) - // stop discovery when we are done - findCtx, findCancel := context.WithCancel(ctx) + findCtx, findCancel := context.WithTimeout(ctx, findPeersTimeout) defer func() { // some workers could still be running, wait them to finish before canceling findCtx wg.Wait() //nolint:errcheck @@ -271,26 +271,11 @@ func (d *Discovery) discover(ctx context.Context) bool { return false } - ticker := time.NewTicker(findPeersStuckWarnDelay) - defer ticker.Stop() for { - ticker.Reset(findPeersStuckWarnDelay) - // drain all previous ticks from channel - drainChannel(ticker.C) select { - case <-findCtx.Done(): - d.metrics.observeFindPeers(ctx, true, true) - return true - case <-ticker.C: - d.metrics.observeDiscoveryStuck(ctx) - log.Warn("wasn't able to find new peers for long time") - continue case p, ok := <-peers: if !ok { - isEnoughPeers := d.set.Size() >= d.set.Limit() - d.metrics.observeFindPeers(ctx, ctx.Err() != nil, isEnoughPeers) - log.Debugw("discovery channel closed", "find_is_canceled", findCtx.Err() != nil) - return isEnoughPeers + break } peer := p @@ -313,10 +298,18 @@ func (d *Discovery) discover(ctx context.Context) bool { } log.Infow("discovered wanted peers", "amount", size) - findCancel() + findCancel() // stop discovery when we are done return nil }) + + continue + case <-findCtx.Done(): } + + isEnoughPeers := d.set.Size() >= d.set.Limit() + d.metrics.observeFindPeers(ctx, isEnoughPeers) + log.Debugw("discovery finished", "discovered_wanted", isEnoughPeers) + return isEnoughPeers } } diff --git a/share/p2p/discovery/discovery_test.go b/share/p2p/discovery/discovery_test.go index f0935086ef..06d88a9079 100644 --- a/share/p2p/discovery/discovery_test.go +++ b/share/p2p/discovery/discovery_test.go @@ -11,6 +11,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/discovery/routing" basic "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,7 +22,7 @@ func TestDiscovery(t *testing.T) { discoveryRetryTimeout = time.Millisecond * 100 // defined in discovery.go - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*30) t.Cleanup(cancel) tn := newTestnet(ctx, t) @@ -80,8 +81,9 @@ type testnet struct { } func newTestnet(ctx context.Context, t *testing.T) *testnet { - swarm := swarmt.GenSwarm(t, swarmt.OptDisableTCP) - hst, err := basic.NewHost(swarm, &basic.HostOpts{}) + bus := eventbus.NewBus() + swarm := swarmt.GenSwarm(t, swarmt.OptDisableTCP, swarmt.EventBus(bus)) + hst, err := basic.NewHost(swarm, &basic.HostOpts{EventBus: bus}) require.NoError(t, err) hst.Start() @@ -110,8 +112,9 @@ func (t *testnet) discovery(opts ...Option) *Discovery { } func (t *testnet) peer() (host.Host, discovery.Discovery) { - swarm := swarmt.GenSwarm(t.T, swarmt.OptDisableTCP) - hst, err := basic.NewHost(swarm, &basic.HostOpts{}) + bus := eventbus.NewBus() + swarm := swarmt.GenSwarm(t.T, swarmt.OptDisableTCP, swarmt.EventBus(bus)) + hst, err := basic.NewHost(swarm, &basic.HostOpts{EventBus: bus}) require.NoError(t.T, err) hst.Start() diff --git a/share/p2p/discovery/metrics.go b/share/p2p/discovery/metrics.go index c147a2eeeb..b6adbb1984 100644 --- a/share/p2p/discovery/metrics.go +++ b/share/p2p/discovery/metrics.go @@ -13,8 +13,7 @@ import ( ) const ( - discoveryEnougPeersKey = "enough_peers" - discoveryFindCancledKey = "is_canceled" + discoveryEnoughPeersKey = "enough_peers" handlePeerResultKey = "result" handlePeerSkipSelf handlePeerResult = "skip_self" @@ -37,7 +36,6 @@ type handlePeerResult string type metrics struct { peersAmount asyncint64.Gauge discoveryResult syncint64.Counter // attributes: enough_peers[bool],is_canceled[bool] - discoveryStuck syncint64.Counter handlePeerResult syncint64.Counter // attributes: result[string] advertise syncint64.Counter // attributes: failed[bool] peerAdded syncint64.Counter @@ -68,12 +66,6 @@ func initMetrics(d *Discovery) (*metrics, error) { return nil, err } - discoveryStuck, err := meter.SyncInt64().Counter("discovery_lookup_is_stuck", - instrument.WithDescription("indicates discovery wasn't able to find peers for more than 1 min")) - if err != nil { - return nil, err - } - handlePeerResultCounter, err := meter.SyncInt64().Counter("discovery_handler_peer_result", instrument.WithDescription("result handling found peer")) if err != nil { @@ -107,7 +99,6 @@ func initMetrics(d *Discovery) (*metrics, error) { metrics := &metrics{ peersAmount: peersAmount, discoveryResult: discoveryResult, - discoveryStuck: discoveryStuck, handlePeerResult: handlePeerResultCounter, advertise: advertise, peerAdded: peerAdded, @@ -130,7 +121,7 @@ func initMetrics(d *Discovery) (*metrics, error) { return metrics, nil } -func (m *metrics) observeFindPeers(ctx context.Context, canceled, isEnoughPeers bool) { +func (m *metrics) observeFindPeers(ctx context.Context, isEnoughPeers bool) { if m == nil { return } @@ -139,8 +130,7 @@ func (m *metrics) observeFindPeers(ctx context.Context, canceled, isEnoughPeers } m.discoveryResult.Add(ctx, 1, - attribute.Bool(discoveryFindCancledKey, canceled), - attribute.Bool(discoveryEnougPeersKey, isEnoughPeers)) + attribute.Bool(discoveryEnoughPeersKey, isEnoughPeers)) } func (m *metrics) observeHandlePeer(ctx context.Context, result handlePeerResult) { @@ -179,14 +169,3 @@ func (m *metrics) observeOnPeersUpdate(_ peer.ID, isAdded bool) { } m.peerRemoved.Add(ctx, 1) } - -func (m *metrics) observeDiscoveryStuck(ctx context.Context) { - if m == nil { - return - } - if ctx.Err() != nil { - ctx = context.Background() - } - - m.discoveryStuck.Add(ctx, 1) -} diff --git a/share/share.go b/share/share.go index 06f911636d..0178054a9f 100644 --- a/share/share.go +++ b/share/share.go @@ -8,6 +8,8 @@ import ( "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/share/ipld" ) var ( @@ -18,15 +20,18 @@ var ( ) const ( - // MaxSquareSize is currently the maximum size supported for unerasured data in - // rsmt2d.ExtendedDataSquare. - MaxSquareSize = appconsts.MaxSquareSize // NamespaceSize is a system-wide size for NMT namespaces. NamespaceSize = appconsts.NamespaceSize // Size is a system-wide size of a share, including both data and namespace ID Size = appconsts.ShareSize ) +var ( + // MaxSquareSize is currently the maximum size supported for unerasured data in + // rsmt2d.ExtendedDataSquare. + MaxSquareSize = ipld.MaxSquareSize +) + // Share contains the raw share data without the corresponding namespace. // NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. // Ideally, we should define reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely diff --git a/state/address_test.go b/state/address_test.go new file mode 100644 index 0000000000..d701b38aa8 --- /dev/null +++ b/state/address_test.go @@ -0,0 +1,57 @@ +package state + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAddressMarshalling(t *testing.T) { + testCases := []struct { + name string + addressString string + addressFromStr func(string) (interface{}, error) + marshalJSON func(interface{}) ([]byte, error) + unmarshalJSON func([]byte) (interface{}, error) + }{ + { + name: "Account Address", + addressString: "celestia1377k5an3f94v6wyaceu0cf4nq6gk2jtpc46g7h", + addressFromStr: func(s string) (interface{}, error) { return sdk.AccAddressFromBech32(s) }, + marshalJSON: func(addr interface{}) ([]byte, error) { return addr.(sdk.AccAddress).MarshalJSON() }, + unmarshalJSON: func(b []byte) (interface{}, error) { + var addr sdk.AccAddress + err := addr.UnmarshalJSON(b) + return addr, err + }, + }, + { + name: "Validator Address", + addressString: "celestiavaloper1q3v5cugc8cdpud87u4zwy0a74uxkk6u4q4gx4p", + addressFromStr: func(s string) (interface{}, error) { return sdk.ValAddressFromBech32(s) }, + marshalJSON: func(addr interface{}) ([]byte, error) { return addr.(sdk.ValAddress).MarshalJSON() }, + unmarshalJSON: func(b []byte) (interface{}, error) { + var addr sdk.ValAddress + err := addr.UnmarshalJSON(b) + return addr, err + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + addr, err := tc.addressFromStr(tc.addressString) + require.NoError(t, err) + + addrBytes, err := tc.marshalJSON(addr) + assert.NoError(t, err) + assert.Equal(t, []byte("\""+tc.addressString+"\""), addrBytes) + + addrUnmarshalled, err := tc.unmarshalJSON(addrBytes) + assert.NoError(t, err) + assert.Equal(t, addr, addrUnmarshalled) + }) + } +} diff --git a/state/core_access.go b/state/core_access.go index aed7db3df3..7b59f3e714 100644 --- a/state/core_access.go +++ b/state/core_access.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + sdkErrors "cosmossdk.io/errors" "github.com/cosmos/cosmos-sdk/api/tendermint/abci" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdktypes "github.com/cosmos/cosmos-sdk/types" @@ -20,12 +21,11 @@ import ( "google.golang.org/grpc/credentials/insecure" "github.com/celestiaorg/celestia-app/app" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/x/blob" + appblob "github.com/celestiaorg/celestia-app/x/blob" apptypes "github.com/celestiaorg/celestia-app/x/blob/types" libhead "github.com/celestiaorg/go-header" - "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/header" ) @@ -158,17 +158,24 @@ func (ca *CoreAccessor) constructSignedTx( func (ca *CoreAccessor) SubmitPayForBlob( ctx context.Context, - nID namespace.ID, - data []byte, fee Int, gasLim uint64, + blobs []*blob.Blob, ) (*TxResponse, error) { - b := &apptypes.Blob{NamespaceId: nID, Data: data, ShareVersion: uint32(appconsts.DefaultShareVersion)} - response, err := blob.SubmitPayForBlob( + if len(blobs) == 0 { + return nil, errors.New("state: no blobs provided") + } + + appblobs := make([]*apptypes.Blob, len(blobs)) + for i, blob := range blobs { + appblobs[i] = &blob.Blob + } + + response, err := appblob.SubmitPayForBlob( ctx, ca.signer, ca.coreConn, - []*apptypes.Blob{b}, + appblobs, apptypes.SetGasLimit(gasLim), withFee(fee), ) @@ -177,15 +184,19 @@ func (ca *CoreAccessor) SubmitPayForBlob( ca.lastPayForBlob = time.Now().UnixMilli() ca.payForBlobCount++ } + + if response != nil && response.Code != 0 { + err = errors.Join(err, sdkErrors.ABCIError(response.Codespace, response.Code, response.Logs.String())) + } return response, err } func (ca *CoreAccessor) AccountAddress(context.Context) (Address, error) { addr, err := ca.signer.GetSignerInfo().GetAddress() if err != nil { - return nil, err + return Address{nil}, err } - return addr, nil + return Address{addr}, nil } func (ca *CoreAccessor) Balance(ctx context.Context) (*Balance, error) { @@ -193,7 +204,7 @@ func (ca *CoreAccessor) Balance(ctx context.Context) (*Balance, error) { if err != nil { return nil, err } - return ca.BalanceForAddress(ctx, addr) + return ca.BalanceForAddress(ctx, Address{addr}) } func (ca *CoreAccessor) BalanceForAddress(ctx context.Context, addr Address) (*Balance, error) { diff --git a/state/integration_test.go b/state/integration_test.go index e7d2496397..8862de1bf8 100644 --- a/state/integration_test.go +++ b/state/integration_test.go @@ -110,7 +110,7 @@ func (s *IntegrationTestSuite) TestGetBalance() { require := s.Require() expectedBal := sdk.NewCoin(app.BondDenom, sdk.NewInt(int64(99999999999999999))) for _, acc := range s.accounts { - bal, err := s.accessor.BalanceForAddress(context.Background(), s.getAddress(acc)) + bal, err := s.accessor.BalanceForAddress(context.Background(), Address{s.getAddress(acc)}) require.NoError(err) require.Equal(&expectedBal, bal) } diff --git a/state/state.go b/state/state.go index 987a783239..d55bb6901c 100644 --- a/state/state.go +++ b/state/state.go @@ -1,6 +1,9 @@ package state import ( + "fmt" + "strings" + "cosmossdk.io/math" sdk "github.com/cosmos/cosmos-sdk/types" coretypes "github.com/tendermint/tendermint/types" @@ -15,8 +18,11 @@ type Tx = coretypes.Tx // TxResponse is an alias to the TxResponse type from Cosmos-SDK. type TxResponse = sdk.TxResponse -// Address is an alias to the Address type from Cosmos-SDK. -type Address = sdk.Address +// Address is an alias to the Address type from Cosmos-SDK. It is embedded into a struct to provide +// a non-interface type for JSON serialization. +type Address struct { + sdk.Address +} // ValAddress is an alias to the ValAddress type from Cosmos-SDK. type ValAddress = sdk.ValAddress @@ -26,3 +32,27 @@ type AccAddress = sdk.AccAddress // Int is an alias to the Int type from Cosmos-SDK. type Int = math.Int + +func (a *Address) UnmarshalJSON(data []byte) error { + // To convert the string back to a concrete type, we have to determine the correct implementation + var addr AccAddress + addrString := strings.Trim(string(data), "\"") + addr, err := sdk.AccAddressFromBech32(addrString) + if err != nil { + // first check if it is a validator address and can be converted + valAddr, err := sdk.ValAddressFromBech32(addrString) + if err != nil { + return fmt.Errorf("address must be a valid account or validator address: %w", err) + } + a.Address = valAddr + return nil + } + + a.Address = addr + return nil +} + +func (a Address) MarshalJSON() ([]byte, error) { + // The address is marshaled into a simple string value + return []byte("\"" + a.Address.String() + "\""), nil +}