From 4d45aa9fdda3482390802ac3bb1940030a12e855 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 27 Jul 2024 17:11:12 +0700 Subject: [PATCH 01/49] compatibility with geth - of stateDiff encoding (#11362) cherry pick of https://github.com/erigontech/erigon/pull/10531 Co-authored-by: boyuan-chen <46272347+boyuan-chen@users.noreply.github.com> --- turbo/adapter/ethapi/api.go | 10 +++++----- turbo/adapter/ethapi/state_overrides.go | 10 ++++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/turbo/adapter/ethapi/api.go b/turbo/adapter/ethapi/api.go index be3b38197eb..92767e1ce9e 100644 --- a/turbo/adapter/ethapi/api.go +++ b/turbo/adapter/ethapi/api.go @@ -170,11 +170,11 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type // if statDiff is set, all diff will be applied first and then execute the call // message. type Account struct { - Nonce *hexutil.Uint64 `json:"nonce"` - Code *hexutility.Bytes `json:"code"` - Balance **hexutil.Big `json:"balance"` - State *map[libcommon.Hash]uint256.Int `json:"state"` - StateDiff *map[libcommon.Hash]uint256.Int `json:"stateDiff"` + Nonce *hexutil.Uint64 `json:"nonce"` + Code *hexutility.Bytes `json:"code"` + Balance **hexutil.Big `json:"balance"` + State *map[libcommon.Hash]libcommon.Hash `json:"state"` + StateDiff *map[libcommon.Hash]libcommon.Hash `json:"stateDiff"` } func NewRevertError(result *core.ExecutionResult) *RevertError { diff --git a/turbo/adapter/ethapi/state_overrides.go b/turbo/adapter/ethapi/state_overrides.go index 1ce2655acad..841c55c6c76 100644 --- a/turbo/adapter/ethapi/state_overrides.go +++ b/turbo/adapter/ethapi/state_overrides.go @@ -36,13 +36,19 @@ func (overrides *StateOverrides) Override(state *state.IntraBlockState) error { } // Replace entire state if caller requires. if account.State != nil { - state.SetStorage(addr, *account.State) + intState := map[libcommon.Hash]uint256.Int{} + for key, value := range *account.State { + intValue := new(uint256.Int).SetBytes32(value.Bytes()) + intState[key] = *intValue + } + state.SetStorage(addr, intState) } // Apply state diff into specified accounts. if account.StateDiff != nil { for key, value := range *account.StateDiff { key := key - state.SetState(addr, &key, value) + intValue := new(uint256.Int).SetBytes32(value.Bytes()) + state.SetState(addr, &key, *intValue) } } } From 217b5a19707a5a9869d203e91bad6325d1669d77 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 29 Jul 2024 14:39:16 +0700 Subject: [PATCH 02/49] remove e3 metrics from e2 (#11374) --- erigon-lib/state/aggregator_files.go | 24 ------------------------ erigon-lib/state/domain.go | 4 ---- 2 files changed, 28 deletions(-) diff --git a/erigon-lib/state/aggregator_files.go b/erigon-lib/state/aggregator_files.go index 95ab6a71d93..eac579c2f6a 100644 --- a/erigon-lib/state/aggregator_files.go +++ b/erigon-lib/state/aggregator_files.go @@ -22,36 +22,12 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" - "github.com/ledgerwatch/erigon-lib/metrics" ) // StepsInBiggestFile - files of this size are completely frozen/immutable. // files of smaller size are also immutable, but can be removed after merge to bigger files. const StepsInBiggestFile = 32 -var ( - mxCurrentTx = metrics.GetOrCreateGauge("domain_tx_processed") //nolint - mxCurrentBlock = metrics.GetOrCreateGauge("domain_block_current") //nolint - mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges") //nolint - mxRunningCollations = metrics.GetOrCreateGauge("domain_running_collations") //nolint - mxCollateTook = metrics.GetOrCreateHistogram("domain_collate_took") //nolint - mxPruneTook = metrics.GetOrCreateHistogram("domain_prune_took") //nolint - mxPruneHistTook = metrics.GetOrCreateHistogram("domain_prune_hist_took") //nolint - mxPruningProgress = metrics.GetOrCreateGauge("domain_pruning_progress") //nolint - mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size") //nolint - mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size") //nolint - mxPruneSize = metrics.GetOrCreateCounter("domain_prune_size") //nolint - mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") //nolint - mxStepCurrent = metrics.GetOrCreateGauge("domain_step_current") //nolint - mxStepTook = metrics.GetOrCreateHistogram("domain_step_took") //nolint - mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys") //nolint - mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment") //nolint - mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") //nolint - mxCommitmentWriteTook = metrics.GetOrCreateHistogram("domain_commitment_write_took") //nolint - mxCommitmentUpdates = metrics.GetOrCreateCounter("domain_commitment_updates") //nolint - mxCommitmentUpdatesApplied = metrics.GetOrCreateCounter("domain_commitment_updates_applied") //nolint -) - type SelectedStaticFiles struct { accounts []*filesItem accountsIdx []*filesItem diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index 48973c1cf01..d4476bd9dfc 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -900,8 +900,6 @@ func (d *Domain) integrateFiles(sf StaticFiles, txNumFrom, txNumTo uint64) { // [txFrom; txTo) func (d *Domain) prune(ctx context.Context, step uint64, txFrom, txTo, limit uint64, logEvery *time.Ticker) error { defer func(t time.Time) { d.stats.LastPruneTook = time.Since(t) }(time.Now()) - mxPruningProgress.Inc() - defer mxPruningProgress.Dec() var ( _state = "scan steps" @@ -948,7 +946,6 @@ func (d *Domain) prune(ctx context.Context, step uint64, txFrom, txTo, limit uin if err := keysCursor.DeleteCurrent(); err != nil { return fmt.Errorf("prune key %x: %w", k, err) } - mxPruneSize.Inc() keyMaxSteps[string(k)] = s } } @@ -992,7 +989,6 @@ func (d *Domain) prune(ctx context.Context, step uint64, txFrom, txTo, limit uin if err := valsCursor.DeleteCurrent(); err != nil { return fmt.Errorf("prune val %x: %w", k, err) } - mxPruneSize.Inc() } pos.Add(1) //_prog = 100 * (float64(pos) / float64(totalKeys)) From 5a376972d8f5674b8c37b1df8ba10bda1514b9e5 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 29 Jul 2024 20:52:33 +0700 Subject: [PATCH 03/49] bor: "header not found" add debug logs (e2) (#11361) --- eth/stagedsync/stage_bor_heimdall.go | 2 + polygon/bor/bor.go | 2 + .../snapshotsync/freezeblocks/block_reader.go | 50 +++++++++++-------- 3 files changed, 32 insertions(+), 22 deletions(-) diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index b9e43db34e8..1c02bbd97e8 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -12,6 +12,7 @@ import ( "time" lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/log/v3" "golang.org/x/sync/errgroup" @@ -250,6 +251,7 @@ func BorHeimdallForward( return err } if header == nil { + _, _ = cfg.blockReader.HeaderByNumber(dbg.ContextWithDebug(ctx, true), tx, blockNum) return fmt.Errorf("header not found: %d", blockNum) } diff --git a/polygon/bor/bor.go b/polygon/bor/bor.go index ebbdb2524b2..411df098b3c 100644 --- a/polygon/bor/bor.go +++ b/polygon/bor/bor.go @@ -17,6 +17,7 @@ import ( "time" lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/log/v3" "github.com/xsleonard/go-merkle" "golang.org/x/crypto/sha3" @@ -1438,6 +1439,7 @@ func (c *Bor) getHeaderByNumber(ctx context.Context, tx kv.Tx, number uint64) (* return nil, err } if header == nil { + _, _ = c.blockReader.HeaderByNumber(dbg.ContextWithDebug(ctx, true), tx, number) return nil, fmt.Errorf("[bor] header not found: %d", number) } return header, nil diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index 060ec99de9b..61135fdfbf4 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -357,38 +357,39 @@ func (r *BlockReader) HeadersRange(ctx context.Context, walker func(header *type } func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) { - //TODO: investigate why code blolow causing getting error `Could not set forkchoice app=caplin stage=ForkChoice err="execution Client RPC failed to retrieve ForkChoiceUpdate response, err: unknown ancestor"` - //maxBlockNumInFiles := r.sn.BlocksAvailable() - //if maxBlockNumInFiles == 0 || blockHeight > maxBlockNumInFiles { - // if tx == nil { - // return nil, nil - // } - // blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) - // if err != nil { - // return nil, err - // } - // if blockHash == (common.Hash{}) { - // return nil, nil - // } - // h = rawdb.ReadHeader(tx, blockHash, blockHeight) - // return h, nil - //} + var dbgPrefix string + dbgLogs := dbg.Enabled(ctx) + if dbgLogs { + dbgPrefix = fmt.Sprintf("[dbg] BlockReader(idxMax=%d,segMax=%d).HeaderByNumber(blk=%d) -> ", r.sn.idxMax.Load(), r.sn.segmentsMax.Load(), blockHeight) + } + if tx != nil { blockHash, err := rawdb.ReadCanonicalHash(tx, blockHeight) if err != nil { return nil, err } - if blockHash == (common.Hash{}) { - return nil, nil - } - h = rawdb.ReadHeader(tx, blockHash, blockHeight) - if h != nil { - return h, nil + // if no canonical marker - still can try read from files + if blockHash != emptyHash { + h = rawdb.ReadHeader(tx, blockHash, blockHeight) + if h != nil { + return h, nil + } else { + if dbgLogs { + log.Info(dbgPrefix + "not found in db") + } + } + } else { + if dbgLogs { + log.Info(dbgPrefix + "canonical hash is empty") + } } } seg, ok, release := r.sn.ViewSingleFile(coresnaptype.Headers, blockHeight) if !ok { + if dbgLogs { + log.Info(dbgPrefix + "not found file for such blockHeight") + } return } defer release() @@ -397,6 +398,11 @@ func (r *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHei if err != nil { return nil, err } + if h == nil { + if dbgLogs { + log.Info(dbgPrefix + "got nil from file") + } + } return h, nil } From bcd13728ccbdf496626d29ea7b1e1ff81227435f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 29 Jul 2024 22:18:49 +0700 Subject: [PATCH 04/49] callTracer: don't capture logs txs failed with error (#11375) for https://github.com/erigontech/erigon/issues/11257 --- eth/tracers/native/call.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index da866627f3e..964b02aa13a 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -182,6 +182,10 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco if t.config.OnlyTopCall && depth > 0 { return } + // on-error `stackData[stackSize-2]` will contain error data instead of logs. + if err != nil { + return + } // Skip if tracing was interrupted if atomic.LoadUint32(&t.interrupt) > 0 { return From f15e7e1256e2131b1aca81c9bf8d831cb6799794 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 1 Aug 2024 19:22:39 +0700 Subject: [PATCH 05/49] Don't close `roSn` object while downloading (#11432) --- turbo/snapshotsync/snapshotsync.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index f56a85b89de..a54f99a7377 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -83,11 +83,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool return nil } - snapshots.Close() - if cc.Bor != nil { - borSnapshots.Close() - } - //Corner cases: // - Erigon generated file X with hash H1. User upgraded Erigon. New version has preverified file X with hash H2. Must ignore H2 (don't send to Downloader) // - Erigon "download once": means restart/upgrade/downgrade must not download files (and will be fast) From 08447f46b7e540dfa4aa657f9c3f51fe6dacad0e Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Tue, 6 Aug 2024 04:02:28 +0100 Subject: [PATCH 06/49] changes from main (#11492) Merging diagnostics code updates from main branch. --- diagnostics/block_body_download_stats.go | 16 + diagnostics/bodies_info.go | 19 +- diagnostics/bootnodes.go | 16 + diagnostics/cmd_line.go | 16 + diagnostics/db.go | 16 + diagnostics/flags.go | 16 + diagnostics/header_downloader_stats.go | 16 + diagnostics/headers.go | 19 +- diagnostics/logs.go | 18 +- diagnostics/mem.go | 16 + diagnostics/nodeinfo.go | 16 + diagnostics/peers.go | 16 + diagnostics/setup.go | 29 +- diagnostics/snapshot_sync.go | 37 +- diagnostics/sysinfo.go | 39 ++ diagnostics/version.go | 16 + erigon-lib/diagnostics/block_execution.go | 62 ++- erigon-lib/diagnostics/bodies.go | 24 +- erigon-lib/diagnostics/client.go | 48 +- erigon-lib/diagnostics/entities.go | 126 +++--- erigon-lib/diagnostics/headers.go | 26 +- erigon-lib/diagnostics/network.go | 128 ++++-- erigon-lib/diagnostics/network_test.go | 66 +-- erigon-lib/diagnostics/provider.go | 16 + erigon-lib/diagnostics/provider_test.go | 16 + erigon-lib/diagnostics/resources_usage.go | 25 +- erigon-lib/diagnostics/snapshots.go | 413 ++++-------------- erigon-lib/diagnostics/snapshots_download.go | 215 +++++++++ erigon-lib/diagnostics/snapshots_indexing.go | 108 +++++ erigon-lib/diagnostics/snapshots_test.go | 131 +++++- erigon-lib/diagnostics/speedtest.go | 102 +++-- erigon-lib/diagnostics/stages.go | 38 ++ erigon-lib/diagnostics/sys_info.go | 95 ++-- erigon-lib/diagnostics/utils.go | 72 ++- erigon-lib/diagnostics/utils_test.go | 49 ++- erigon-lib/diskutils/diskutils.go | 5 + erigon-lib/diskutils/diskutils_darwin.go | 15 + erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 +- go.mod | 2 +- go.sum | 4 +- .../freezeblocks/block_snapshots.go | 13 +- 42 files changed, 1518 insertions(+), 608 deletions(-) create mode 100644 diagnostics/sysinfo.go create mode 100644 erigon-lib/diagnostics/snapshots_download.go create mode 100644 erigon-lib/diagnostics/snapshots_indexing.go diff --git a/diagnostics/block_body_download_stats.go b/diagnostics/block_body_download_stats.go index a97c4a6493c..ba72e90490b 100644 --- a/diagnostics/block_body_download_stats.go +++ b/diagnostics/block_body_download_stats.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/bodies_info.go b/diagnostics/bodies_info.go index 795d23c38b2..6a656a0c339 100644 --- a/diagnostics/bodies_info.go +++ b/diagnostics/bodies_info.go @@ -1,7 +1,22 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( - "encoding/json" "net/http" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" @@ -21,5 +36,5 @@ func SetupBodiesAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient } func writeBodies(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.GetBodiesInfo()) + diag.BodiesInfoJson(w) } diff --git a/diagnostics/bootnodes.go b/diagnostics/bootnodes.go index 00fd24c25ed..06e9a766cce 100644 --- a/diagnostics/bootnodes.go +++ b/diagnostics/bootnodes.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/cmd_line.go b/diagnostics/cmd_line.go index a2050ca4397..6dd68cb92ff 100644 --- a/diagnostics/cmd_line.go +++ b/diagnostics/cmd_line.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/db.go b/diagnostics/db.go index e0c7a629561..b91cb3682ea 100644 --- a/diagnostics/db.go +++ b/diagnostics/db.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/flags.go b/diagnostics/flags.go index cbcc11b3228..d2902078423 100644 --- a/diagnostics/flags.go +++ b/diagnostics/flags.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/header_downloader_stats.go b/diagnostics/header_downloader_stats.go index 0b9c4b48a76..74c84b114e2 100644 --- a/diagnostics/header_downloader_stats.go +++ b/diagnostics/header_downloader_stats.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/headers.go b/diagnostics/headers.go index 82066609368..da861ed9902 100644 --- a/diagnostics/headers.go +++ b/diagnostics/headers.go @@ -1,7 +1,22 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( - "encoding/json" "net/http" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" @@ -20,5 +35,5 @@ func SetupHeadersAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClien } func writeHeaders(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.GetHeaders()) + diag.HeadersJson(w) } diff --git a/diagnostics/logs.go b/diagnostics/logs.go index f3d44e5c178..e953f4bc0fe 100644 --- a/diagnostics/logs.go +++ b/diagnostics/logs.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( @@ -103,7 +119,7 @@ func writeLogsRead(w http.ResponseWriter, r *http.Request, dirPath string) { } if fileInfo.IsDir() { - http.Error(w, fmt.Sprintf("%s is a directory, needs to be a file", file), http.StatusInternalServerError) + http.Error(w, file+" is a directory, needs to be a file", http.StatusInternalServerError) return } diff --git a/diagnostics/mem.go b/diagnostics/mem.go index 592b7fdbdcc..32e2c8639ae 100644 --- a/diagnostics/mem.go +++ b/diagnostics/mem.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/nodeinfo.go b/diagnostics/nodeinfo.go index fc09c170436..9fd2bb49d8a 100644 --- a/diagnostics/nodeinfo.go +++ b/diagnostics/nodeinfo.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/peers.go b/diagnostics/peers.go index 8f2d7847396..2a5122a95c0 100644 --- a/diagnostics/peers.go +++ b/diagnostics/peers.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/diagnostics/setup.go b/diagnostics/setup.go index 7eb9363a3c6..e792ccef6ab 100644 --- a/diagnostics/setup.go +++ b/diagnostics/setup.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( @@ -7,6 +23,8 @@ import ( "github.com/urfave/cli/v2" + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + libcommon "github.com/ledgerwatch/erigon-lib/common" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon/turbo/node" "github.com/ledgerwatch/log/v3" @@ -21,6 +39,8 @@ var ( pprofPortFlag = "pprof.port" pprofAddrFlag = "pprof.addr" diagnoticsSpeedTestFlag = "diagnostics.speedtest" + webSeedsFlag = "webseed" + chainFlag = "chain" ) func Setup(ctx *cli.Context, node *node.ErigonNode, metricsMux *http.ServeMux, pprofMux *http.ServeMux) { @@ -49,8 +69,14 @@ func Setup(ctx *cli.Context, node *node.ErigonNode, metricsMux *http.ServeMux, p diagMux = SetupDiagnosticsEndpoint(nil, diagAddress) } + chain := ctx.String(chainFlag) + webseedsList := libcommon.CliString2Array(ctx.String(webSeedsFlag)) + if known, ok := snapcfg.KnownWebseeds[chain]; ok { + webseedsList = append(webseedsList, known...) + } + speedTest := ctx.Bool(diagnoticsSpeedTestFlag) - diagnostic, err := diaglib.NewDiagnosticClient(ctx.Context, diagMux, node.Backend().DataDir(), speedTest) + diagnostic, err := diaglib.NewDiagnosticClient(ctx.Context, diagMux, node.Backend().DataDir(), speedTest, webseedsList) if err == nil { diagnostic.Setup() SetupEndpoints(ctx, node, diagMux, diagnostic) @@ -107,4 +133,5 @@ func SetupEndpoints(ctx *cli.Context, node *node.ErigonNode, diagMux *http.Serve SetupMemAccess(diagMux) SetupHeadersAccess(diagMux, diagnostic) SetupBodiesAccess(diagMux, diagnostic) + SetupSysInfoAccess(diagMux, diagnostic) } diff --git a/diagnostics/snapshot_sync.go b/diagnostics/snapshot_sync.go index 265f8496124..b80d1093ab4 100644 --- a/diagnostics/snapshot_sync.go +++ b/diagnostics/snapshot_sync.go @@ -1,7 +1,22 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( - "encoding/json" "net/http" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" @@ -24,12 +39,6 @@ func SetupStagesAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient writeFilesList(w, diag) }) - metricsMux.HandleFunc("/hardware-info", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Content-Type", "application/json") - writeHardwareInfo(w, diag) - }) - metricsMux.HandleFunc("/resources-usage", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") @@ -50,25 +59,21 @@ func SetupStagesAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient } func writeNetworkSpeed(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.GetNetworkSpeed()) + diag.NetworkSpeedJson(w) } func writeResourcesUsage(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.GetResourcesUsage()) + diag.ResourcesUsageJson(w) } func writeStages(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.SyncStatistics()) + diag.SyncStatsJson(w) } func writeFilesList(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.SnapshotFilesList()) -} - -func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.HardwareInfo()) + diag.SnapshotFilesListJson(w) } func writeSyncStages(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { - json.NewEncoder(w).Encode(diag.GetSyncStages()) + diag.SyncStagesJson(w) } diff --git a/diagnostics/sysinfo.go b/diagnostics/sysinfo.go new file mode 100644 index 00000000000..586692fa3ca --- /dev/null +++ b/diagnostics/sysinfo.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "net/http" + + diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" +) + +func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient) { + if metricsMux == nil { + return + } + + metricsMux.HandleFunc("/hardware-info", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "application/json") + writeHardwareInfo(w, diag) + }) +} + +func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { + diag.HardwareInfoJson(w) +} diff --git a/diagnostics/version.go b/diagnostics/version.go index 6bf869e835f..742056b9430 100644 --- a/diagnostics/version.go +++ b/diagnostics/version.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/erigon-lib/diagnostics/block_execution.go b/erigon-lib/diagnostics/block_execution.go index fcb448376a8..75e83879b73 100644 --- a/erigon-lib/diagnostics/block_execution.go +++ b/erigon-lib/diagnostics/block_execution.go @@ -1,11 +1,62 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" + "io" + "sync" "github.com/ledgerwatch/log/v3" ) +type BlockEexcStatsData struct { + data BlockExecutionStatistics + mu sync.Mutex +} + +type BlockExecutionStatistics struct { + From uint64 `json:"from"` + To uint64 `json:"to"` + BlockNumber uint64 `json:"blockNumber"` + BlkPerSec float64 `json:"blkPerSec"` + TxPerSec float64 `json:"txPerSec"` + MgasPerSec float64 `json:"mgasPerSec"` + GasState float64 `json:"gasState"` + Batch uint64 `json:"batch"` + Alloc uint64 `json:"alloc"` + Sys uint64 `json:"sys"` + TimeElapsed float64 `json:"timeElapsed"` +} + +func (b *BlockEexcStatsData) SetData(d BlockExecutionStatistics) { + b.mu.Lock() + defer b.mu.Unlock() + b.data = d +} + +func (b *BlockEexcStatsData) Data() (d BlockExecutionStatistics) { + b.mu.Lock() + d = b.data + b.mu.Unlock() + return +} + func (d *DiagnosticClient) setupBlockExecutionDiagnostics(rootCtx context.Context) { d.runBlockExecutionListener(rootCtx) } @@ -21,10 +72,7 @@ func (d *DiagnosticClient) runBlockExecutionListener(rootCtx context.Context) { case <-rootCtx.Done(): return case info := <-ch: - d.mu.Lock() - d.syncStats.BlockExecution = info - d.mu.Unlock() - + d.BlockExecution.SetData(info) if d.syncStats.SyncFinished { return } @@ -32,3 +80,9 @@ func (d *DiagnosticClient) runBlockExecutionListener(rootCtx context.Context) { } }() } + +func (d *DiagnosticClient) BlockExecutionInfoJson(w io.Writer) { + if err := json.NewEncoder(w).Encode(d.BlockExecution.Data()); err != nil { + log.Debug("[diagnostics] BlockExecutionInfoJson", "err", err) + } +} diff --git a/erigon-lib/diagnostics/bodies.go b/erigon-lib/diagnostics/bodies.go index 6c8c50916e6..8b0979c8e22 100644 --- a/erigon-lib/diagnostics/bodies.go +++ b/erigon-lib/diagnostics/bodies.go @@ -1,7 +1,25 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" + "io" "github.com/ledgerwatch/log/v3" ) @@ -93,8 +111,10 @@ func (d *DiagnosticClient) runBodiesProcessingListener(rootCtx context.Context) }() } -func (d *DiagnosticClient) GetBodiesInfo() BodiesInfo { +func (d *DiagnosticClient) BodiesInfoJson(w io.Writer) { d.bodiesMutex.Lock() defer d.bodiesMutex.Unlock() - return d.bodies + if err := json.NewEncoder(w).Encode(d.bodies); err != nil { + log.Debug("[diagnostics] BodiesInfoJson", "err", err) + } } diff --git a/erigon-lib/diagnostics/client.go b/erigon-lib/diagnostics/client.go index b9de0a86f3d..575e26583b5 100644 --- a/erigon-lib/diagnostics/client.go +++ b/erigon-lib/diagnostics/client.go @@ -1,13 +1,26 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" "net/http" - "os" - "os/signal" "path/filepath" "sync" - "syscall" "time" "github.com/c2h5oh/datasize" @@ -28,6 +41,7 @@ type DiagnosticClient struct { syncStages []SyncStage syncStats SyncStatistics + BlockExecution BlockEexcStatsData snapshotFileList SnapshoFilesList mu sync.Mutex headerMutex sync.Mutex @@ -40,9 +54,10 @@ type DiagnosticClient struct { resourcesUsageMutex sync.Mutex networkSpeed NetworkSpeedTestResult networkSpeedMutex sync.Mutex + webseedsList []string } -func NewDiagnosticClient(ctx context.Context, metricsMux *http.ServeMux, dataDirPath string, speedTest bool) (*DiagnosticClient, error) { +func NewDiagnosticClient(ctx context.Context, metricsMux *http.ServeMux, dataDirPath string, speedTest bool, webseedsList []string) (*DiagnosticClient, error) { dirPath := filepath.Join(dataDirPath, "diagnostics") db, err := createDb(ctx, dirPath) if err != nil { @@ -69,7 +84,8 @@ func NewDiagnosticClient(ctx context.Context, metricsMux *http.ServeMux, dataDir resourcesUsage: ResourcesUsage{ MemoryUsage: []MemoryStats{}, }, - peersStats: NewPeerStats(1000), // 1000 is the limit of peers; TODO: make it configurable through a flag + peersStats: NewPeerStats(1000), // 1000 is the limit of peers; TODO: make it configurable through a flag + webseedsList: webseedsList, }, nil } @@ -104,34 +120,21 @@ func (d *DiagnosticClient) Setup() { d.setupResourcesUsageDiagnostics(rootCtx) d.setupSpeedtestDiagnostics(rootCtx) d.runSaveProcess(rootCtx) - d.runStopNodeListener(rootCtx) //d.logDiagMsgs() - -} - -func (d *DiagnosticClient) runStopNodeListener(rootCtx context.Context) { - go func() { - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt, syscall.SIGTERM) - select { - case <-ch: - d.SaveData() - case <-rootCtx.Done(): - } - }() } // Save diagnostic data by time interval to reduce save events func (d *DiagnosticClient) runSaveProcess(rootCtx context.Context) { ticker := time.NewTicker(5 * time.Minute) go func() { + defer ticker.Stop() for { select { case <-ticker.C: d.SaveData() case <-rootCtx.Done(): - ticker.Stop() + d.SaveData() return } } @@ -139,6 +142,9 @@ func (d *DiagnosticClient) runSaveProcess(rootCtx context.Context) { } func (d *DiagnosticClient) SaveData() { + d.mu.Lock() + defer d.mu.Unlock() + var funcs []func(tx kv.RwTx) error funcs = append(funcs, SnapshotDownloadUpdater(d.syncStats.SnapshotDownload), StagesListUpdater(d.syncStages), SnapshotIndexingUpdater(d.syncStats.SnapshotIndexing)) @@ -239,7 +245,7 @@ func ReadSavedData(db kv.RoDB) (hinfo HardwareInfo, ssinfo []SyncStage, snpdwl S } var ramInfo RAMInfo - var cpuInfo CPUInfo + var cpuInfo []CPUInfo var diskInfo DiskInfo ParseData(ramBytes, &ramInfo) ParseData(cpuBytes, &cpuInfo) diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index 0d8f29534c7..dc850e2fb50 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -1,40 +1,25 @@ -/* - Copyright 2021 Erigon contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . package diagnostics import ( "time" -) -type SyncStageType string - -const ( - Snapshots SyncStageType = "Snapshots" - BlockHashes SyncStageType = "BlockHashes" - Senders SyncStageType = "Senders" - Execution SyncStageType = "Execution" - HashState SyncStageType = "HashState" - IntermediateHashes SyncStageType = "IntermediateHashes" - CallTraces SyncStageType = "CallTraces" - AccountHistoryIndex SyncStageType = "AccountHistoryIndex" - StorageHistoryIndex SyncStageType = "StorageHistoryIndex" - LogIndex SyncStageType = "LogIndex" - TxLookup SyncStageType = "TxLookup" - Finish SyncStageType = "Finish" + "golang.org/x/exp/maps" ) type PeerStatistics struct { @@ -47,6 +32,25 @@ type PeerStatistics struct { TypeBytesOut map[string]uint64 } +func (p PeerStatistics) Clone() PeerStatistics { + p1 := p + p1.CapBytesIn = maps.Clone(p.CapBytesIn) + p1.CapBytesOut = maps.Clone(p.CapBytesOut) + p1.TypeBytesIn = maps.Clone(p.TypeBytesIn) + p1.TypeBytesOut = maps.Clone(p.TypeBytesOut) + return p1 +} + +func (p PeerStatistics) Equal(p2 PeerStatistics) bool { + return p.PeerType == p2.PeerType && + p.BytesIn == p2.BytesIn && + p.BytesOut == p2.BytesOut && + maps.Equal(p.CapBytesIn, p2.CapBytesIn) && + maps.Equal(p.CapBytesOut, p2.CapBytesOut) && + maps.Equal(p.TypeBytesIn, p2.TypeBytesIn) && + maps.Equal(p.TypeBytesOut, p2.TypeBytesOut) +} + type PeerDataUpdate struct { PeerID string ENR string @@ -70,7 +74,6 @@ type SyncStatistics struct { SnapshotDownload SnapshotDownloadStatistics `json:"snapshotDownload"` SnapshotIndexing SnapshotIndexingStatistics `json:"snapshotIndexing"` SnapshotFillDB SnapshotFillDBStatistics `json:"snapshotFillDB"` - BlockExecution BlockExecutionStatistics `json:"blockExecution"` SyncFinished bool `json:"syncFinished"` } @@ -128,10 +131,6 @@ type SnapshotSegmentIndexingStatistics struct { Sys uint64 `json:"sys"` } -type SnapshotSegmentIndexingFinishedUpdate struct { - SegmentName string `json:"segmentName"` -} - type SnapshotFillDBStatistics struct { Stages []SnapshotFillDBStage `json:"stages"` } @@ -147,45 +146,46 @@ type SnapshotFillDBStageUpdate struct { TimeElapsed float64 `json:"timeElapsed"` } -type BlockExecutionStatistics struct { - From uint64 `json:"from"` - To uint64 `json:"to"` - BlockNumber uint64 `json:"blockNumber"` - BlkPerSec float64 `json:"blkPerSec"` - TxPerSec float64 `json:"txPerSec"` - MgasPerSec float64 `json:"mgasPerSec"` - GasState float64 `json:"gasState"` - Batch uint64 `json:"batch"` - Alloc uint64 `json:"alloc"` - Sys uint64 `json:"sys"` - TimeElapsed float64 `json:"timeElapsed"` -} - type SnapshoFilesList struct { Files []string `json:"files"` } type HardwareInfo struct { - Disk DiskInfo `json:"disk"` - RAM RAMInfo `json:"ram"` - CPU CPUInfo `json:"cpu"` + Disk DiskInfo `json:"disk"` + RAM RAMInfo `json:"ram"` + CPU []CPUInfo `json:"cpu"` } type RAMInfo struct { - Total uint64 `json:"total"` - Free uint64 `json:"free"` + Total uint64 `json:"total"` + Available uint64 `json:"available"` + Used uint64 `json:"used"` + UsedPercent float64 `json:"usedPercent"` } type DiskInfo struct { - FsType string `json:"fsType"` - Total uint64 `json:"total"` - Free uint64 `json:"free"` + FsType string `json:"fsType"` + Total uint64 `json:"total"` + Free uint64 `json:"free"` + MountPoint string `json:"mountPoint"` + Device string `json:"device"` + Details string `json:"details"` } type CPUInfo struct { - Cores int `json:"cores"` - ModelName string `json:"modelName"` - Mhz float64 `json:"mhz"` + CPU int32 `json:"cpu"` + VendorID string `json:"vendorId"` + Family string `json:"family"` + Model string `json:"model"` + Stepping int32 `json:"stepping"` + PhysicalID string `json:"physicalId"` + CoreID string `json:"coreId"` + Cores int32 `json:"cores"` + ModelName string `json:"modelName"` + Mhz float64 `json:"mhz"` + CacheSize int32 `json:"cacheSize"` + Flags []string `json:"flags"` + Microcode string `json:"microcode"` } type BlockHeadersUpdate struct { @@ -325,10 +325,6 @@ func (ti SnapshotIndexingStatistics) Type() Type { return TypeOf(ti) } -func (ti SnapshotSegmentIndexingFinishedUpdate) Type() Type { - return TypeOf(ti) -} - func (ti PeerStatisticMsgUpdate) Type() Type { return TypeOf(ti) } diff --git a/erigon-lib/diagnostics/headers.go b/erigon-lib/diagnostics/headers.go index 9bf60521949..c02a7255015 100644 --- a/erigon-lib/diagnostics/headers.go +++ b/erigon-lib/diagnostics/headers.go @@ -1,7 +1,25 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" + "io" "github.com/ledgerwatch/log/v3" ) @@ -13,8 +31,12 @@ func (d *DiagnosticClient) setupHeadersDiagnostics(rootCtx context.Context) { d.runProcessedListener(rootCtx) } -func (d *DiagnosticClient) GetHeaders() Headers { - return d.headers +func (d *DiagnosticClient) HeadersJson(w io.Writer) { + d.headerMutex.Lock() + defer d.headerMutex.Unlock() + if err := json.NewEncoder(w).Encode(d.headers); err != nil { + log.Debug("[diagnostics] HeadersJson", "err", err) + } } func (d *DiagnosticClient) runHeadersWaitingListener(rootCtx context.Context) { diff --git a/erigon-lib/diagnostics/network.go b/erigon-lib/diagnostics/network.go index 2306aa997bf..280ea818ab1 100644 --- a/erigon-lib/diagnostics/network.go +++ b/erigon-lib/diagnostics/network.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( @@ -14,6 +30,7 @@ type PeerStats struct { recordsCount int lastUpdateMap map[string]time.Time limit int + mu sync.Mutex } func NewPeerStats(peerLimit int) *PeerStats { @@ -26,31 +43,55 @@ func NewPeerStats(peerLimit int) *PeerStats { } func (p *PeerStats) AddOrUpdatePeer(peerID string, peerInfo PeerStatisticMsgUpdate) { + p.mu.Lock() + defer p.mu.Unlock() + + p.addOrUpdatePeer(peerID, peerInfo) +} + +func (p *PeerStats) addOrUpdatePeer(peerID string, peerInfo PeerStatisticMsgUpdate) { if value, ok := p.peersInfo.Load(peerID); ok { - p.UpdatePeer(peerID, peerInfo, value) + p.updatePeer(peerID, peerInfo, value) } else { - p.AddPeer(peerID, peerInfo) - if p.GetPeersCount() > p.limit { - p.RemovePeersWhichExceedLimit(p.limit) + p.addPeer(peerID, peerInfo) + if p.getPeersCount() > p.limit { + p.removePeersWhichExceedLimit(p.limit) } } } +// Deprecated - used in tests. non-thread-safe func (p *PeerStats) AddPeer(peerID string, peerInfo PeerStatisticMsgUpdate) { - pv := PeerStatisticsFromMsgUpdate(peerInfo, nil) + p.mu.Lock() + defer p.mu.Unlock() + p.addPeer(peerID, peerInfo) +} + +func (p *PeerStats) addPeer(peerID string, peerInfo PeerStatisticMsgUpdate) { + pv := peerStatisticsFromMsgUpdate(peerInfo, nil) p.peersInfo.Store(peerID, pv) p.recordsCount++ p.lastUpdateMap[peerID] = time.Now() } func (p *PeerStats) UpdatePeer(peerID string, peerInfo PeerStatisticMsgUpdate, prevValue any) { - pv := PeerStatisticsFromMsgUpdate(peerInfo, prevValue) + p.mu.Lock() + defer p.mu.Unlock() + p.updatePeer(peerID, peerInfo, prevValue) +} + +func (p *PeerStats) updatePeer(peerID string, peerInfo PeerStatisticMsgUpdate, prevValue any) { + pv := peerStatisticsFromMsgUpdate(peerInfo, prevValue) p.peersInfo.Store(peerID, pv) p.lastUpdateMap[peerID] = time.Now() } func PeerStatisticsFromMsgUpdate(msg PeerStatisticMsgUpdate, prevValue any) PeerStatistics { + return peerStatisticsFromMsgUpdate(msg, prevValue) +} + +func peerStatisticsFromMsgUpdate(msg PeerStatisticMsgUpdate, prevValue any) PeerStatistics { ps := PeerStatistics{ PeerType: msg.PeerType, BytesIn: 0, @@ -88,23 +129,38 @@ func PeerStatisticsFromMsgUpdate(msg PeerStatisticMsgUpdate, prevValue any) Peer } func (p *PeerStats) GetPeersCount() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.getPeersCount() +} + +func (p *PeerStats) getPeersCount() int { return p.recordsCount } -func (p *PeerStats) GetPeers() map[string]*PeerStatistics { - stats := make(map[string]*PeerStatistics) +func (p *PeerStats) GetPeers() map[string]PeerStatistics { + p.mu.Lock() + defer p.mu.Unlock() + + return p.getPeers() +} +func (p *PeerStats) getPeers() map[string]PeerStatistics { + stats := make(map[string]PeerStatistics) p.peersInfo.Range(func(key, value interface{}) bool { - if loadedKey, ok := key.(string); ok { - if loadedValue, ok := value.(PeerStatistics); ok { - stats[loadedKey] = &loadedValue - } else { - log.Debug("Failed to cast value to PeerStatistics struct", value) - } - } else { + loadedKey, ok := key.(string) + if !ok { log.Debug("Failed to cast key to string", key) + return true + } + + loadedValue, ok := value.(PeerStatistics) + if !ok { + log.Debug("Failed to cast value to PeerStatistics struct", value) + return true } + stats[loadedKey] = loadedValue.Clone() return true }) @@ -112,24 +168,30 @@ func (p *PeerStats) GetPeers() map[string]*PeerStatistics { } func (p *PeerStats) GetPeerStatistics(peerID string) PeerStatistics { + p.mu.Lock() + defer p.mu.Unlock() + + return p.getPeerStatistics(peerID) +} + +func (p *PeerStats) getPeerStatistics(peerID string) PeerStatistics { if value, ok := p.peersInfo.Load(peerID); ok { if peerStats, ok := value.(PeerStatistics); ok { - return peerStats + return peerStats.Clone() } } return PeerStatistics{} } -func (p *PeerStats) GetLastUpdate(peerID string) time.Time { - if lastUpdate, ok := p.lastUpdateMap[peerID]; ok { - return lastUpdate - } +func (p *PeerStats) RemovePeer(peerID string) { + p.mu.Lock() + defer p.mu.Unlock() - return time.Time{} + p.removePeer(peerID) } -func (p *PeerStats) RemovePeer(peerID string) { +func (p *PeerStats) removePeer(peerID string) { p.peersInfo.Delete(peerID) p.recordsCount-- delete(p.lastUpdateMap, peerID) @@ -141,7 +203,13 @@ type PeerUpdTime struct { } func (p *PeerStats) GetOldestUpdatedPeersWithSize(size int) []PeerUpdTime { - timeArray := make([]PeerUpdTime, 0, p.GetPeersCount()) + p.mu.Lock() + defer p.mu.Unlock() + return p.getOldestUpdatedPeersWithSize(size) +} + +func (p *PeerStats) getOldestUpdatedPeersWithSize(size int) []PeerUpdTime { + timeArray := make([]PeerUpdTime, 0, p.getPeersCount()) for k, v := range p.lastUpdateMap { timeArray = append(timeArray, PeerUpdTime{k, v}) } @@ -158,11 +226,17 @@ func (p *PeerStats) GetOldestUpdatedPeersWithSize(size int) []PeerUpdTime { } func (p *PeerStats) RemovePeersWhichExceedLimit(limit int) { - peersToRemove := p.GetPeersCount() - limit + p.mu.Lock() + defer p.mu.Unlock() + p.removePeersWhichExceedLimit(limit) +} + +func (p *PeerStats) removePeersWhichExceedLimit(limit int) { + peersToRemove := p.getPeersCount() - limit if peersToRemove > 0 { - peers := p.GetOldestUpdatedPeersWithSize(peersToRemove) + peers := p.getOldestUpdatedPeersWithSize(peersToRemove) for _, peer := range peers { - p.RemovePeer(peer.PeerID) + p.removePeer(peer.PeerID) } } } @@ -188,6 +262,6 @@ func (d *DiagnosticClient) runCollectPeersStatistics(rootCtx context.Context) { }() } -func (d *DiagnosticClient) Peers() map[string]*PeerStatistics { +func (d *DiagnosticClient) Peers() map[string]PeerStatistics { return d.peersStats.GetPeers() } diff --git a/erigon-lib/diagnostics/network_test.go b/erigon-lib/diagnostics/network_test.go index 122c2e117e4..a2c121e339c 100644 --- a/erigon-lib/diagnostics/network_test.go +++ b/erigon-lib/diagnostics/network_test.go @@ -1,9 +1,24 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics_test import ( "strconv" "testing" - "time" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/stretchr/testify/require" @@ -143,56 +158,45 @@ func TestGetPeers(t *testing.T) { peers := peerStats.GetPeers() require.Equal(t, 3, len(peers)) - require.Equal(t, &mockInboundPeerStats, peers["test1"]) + require.True(t, peers["test1"].Equal(mockInboundPeerStats)) } -func TestLastUpdated(t *testing.T) { - peerStats := diagnostics.NewPeerStats(1000) - - peerStats.AddOrUpdatePeer("test1", mockInboundUpdMsg) - require.NotEmpty(t, peerStats.GetLastUpdate("test1")) +func TestRemovePeersWhichExceedLimit(t *testing.T) { + limit := 100 + peerStats := diagnostics.NewPeerStats(limit) - for i := 1; i < 20; i++ { + for i := 1; i < 105; i++ { pid := "test" + strconv.Itoa(i) peerStats.AddOrUpdatePeer(pid, mockInboundUpdMsg) - //wait for 1 milisecond to make sure that the last update time is different - time.Sleep(10 * time.Millisecond) } + require.Equal(t, 100, peerStats.GetPeersCount()) - require.True(t, peerStats.GetLastUpdate("test2").After(peerStats.GetLastUpdate("test1"))) - - oldestPeers := peerStats.GetOldestUpdatedPeersWithSize(10) + peerStats.RemovePeersWhichExceedLimit(limit) - // we have 100 peers, but we should get only 10 oldest - require.Equal(t, len(oldestPeers), 10) - // the oldest peer should be test1 - require.Equal(t, "test1", oldestPeers[0].PeerID) + require.Equal(t, limit, peerStats.GetPeersCount()) - // update test1 to - peerStats.AddOrUpdatePeer("test1", mockInboundUpdMsg) - oldestPeers = peerStats.GetOldestUpdatedPeersWithSize(10) + limit = 1000 + peerStats.RemovePeersWhichExceedLimit(limit) - // the oldest peer should not be test1 - require.NotEqual(t, "test1", oldestPeers[0].PeerID) + require.Equal(t, 100, peerStats.GetPeersCount()) } -func TestRemovePeersWhichExceedLimit(t *testing.T) { - limit := 100 +func TestRemovePeer(t *testing.T) { + limit := 10 peerStats := diagnostics.NewPeerStats(limit) - for i := 1; i < 105; i++ { + for i := 1; i < 11; i++ { pid := "test" + strconv.Itoa(i) peerStats.AddOrUpdatePeer(pid, mockInboundUpdMsg) } + require.Equal(t, 10, peerStats.GetPeersCount()) - peerStats.RemovePeersWhichExceedLimit(limit) - - require.Equal(t, limit, peerStats.GetPeersCount()) + peerStats.RemovePeer("test1") - limit = 1000 - peerStats.RemovePeersWhichExceedLimit(limit) + require.Equal(t, limit-1, peerStats.GetPeersCount()) - require.Equal(t, 100, peerStats.GetPeersCount()) + firstPeerStats := peerStats.GetPeerStatistics("test1") + require.True(t, firstPeerStats.Equal(diagnostics.PeerStatistics{})) } func TestAddingPeersAboveTheLimit(t *testing.T) { diff --git a/erigon-lib/diagnostics/provider.go b/erigon-lib/diagnostics/provider.go index fe27c22f5d4..d278f0f1799 100644 --- a/erigon-lib/diagnostics/provider.go +++ b/erigon-lib/diagnostics/provider.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( diff --git a/erigon-lib/diagnostics/provider_test.go b/erigon-lib/diagnostics/provider_test.go index 5329e7de4ea..1d515085ea1 100644 --- a/erigon-lib/diagnostics/provider_test.go +++ b/erigon-lib/diagnostics/provider_test.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics_test import ( diff --git a/erigon-lib/diagnostics/resources_usage.go b/erigon-lib/diagnostics/resources_usage.go index 65ec98e442e..33300c105c6 100644 --- a/erigon-lib/diagnostics/resources_usage.go +++ b/erigon-lib/diagnostics/resources_usage.go @@ -1,7 +1,25 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" + "io" "github.com/ledgerwatch/log/v3" ) @@ -10,13 +28,16 @@ func (d *DiagnosticClient) setupResourcesUsageDiagnostics(rootCtx context.Contex d.runMemoryStatsListener(rootCtx) } -func (d *DiagnosticClient) GetResourcesUsage() ResourcesUsage { +func (d *DiagnosticClient) ResourcesUsageJson(w io.Writer) { d.resourcesUsageMutex.Lock() defer d.resourcesUsageMutex.Unlock() returnObj := d.resourcesUsage d.resourcesUsage = ResourcesUsage{} - return returnObj + + if err := json.NewEncoder(w).Encode(returnObj); err != nil { + log.Debug("[diagnostics] ResourcesUsageJson", "err", err) + } } func (d *DiagnosticClient) runMemoryStatsListener(rootCtx context.Context) { diff --git a/erigon-lib/diagnostics/snapshots.go b/erigon-lib/diagnostics/snapshots.go index 4d374b26172..a86a1e09832 100644 --- a/erigon-lib/diagnostics/snapshots.go +++ b/erigon-lib/diagnostics/snapshots.go @@ -1,8 +1,26 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" "fmt" + "io" "time" "github.com/ledgerwatch/erigon-lib/common" @@ -19,325 +37,12 @@ var ( func (d *DiagnosticClient) setupSnapshotDiagnostics(rootCtx context.Context) { d.runSnapshotListener(rootCtx) d.runSegmentDownloadingListener(rootCtx) - d.runSegmentIndexingListener(rootCtx) - d.runSegmentIndexingFinishedListener(rootCtx) d.runSnapshotFilesListListener(rootCtx) + d.runSegmentIndexingListener(rootCtx) d.runFileDownloadedListener(rootCtx) d.runFillDBListener(rootCtx) } -func (d *DiagnosticClient) runSnapshotListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshotDownloadStatistics](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshotDownloadStatistics{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - - d.mu.Lock() - d.syncStats.SnapshotDownload.Downloaded = info.Downloaded - d.syncStats.SnapshotDownload.Total = info.Total - d.syncStats.SnapshotDownload.TotalTime = info.TotalTime - d.syncStats.SnapshotDownload.DownloadRate = info.DownloadRate - d.syncStats.SnapshotDownload.UploadRate = info.UploadRate - d.syncStats.SnapshotDownload.Peers = info.Peers - d.syncStats.SnapshotDownload.Files = info.Files - d.syncStats.SnapshotDownload.Connections = info.Connections - d.syncStats.SnapshotDownload.Alloc = info.Alloc - d.syncStats.SnapshotDownload.Sys = info.Sys - d.syncStats.SnapshotDownload.DownloadFinished = info.DownloadFinished - d.syncStats.SnapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady - d.mu.Unlock() - - downloadedPercent := getPercentDownloaded(info.Downloaded, info.Total) - remainingBytes := info.Total - info.Downloaded - downloadTimeLeft := CalculateTime(remainingBytes, info.DownloadRate) - totalDownloadTimeString := time.Duration(info.TotalTime) * time.Second - - d.updateSnapshotStageStats(SyncStageStats{ - TimeElapsed: totalDownloadTimeString.String(), - TimeLeft: downloadTimeLeft, - Progress: downloadedPercent, - }, "Downloading snapshots") - - if info.DownloadFinished { - d.SaveData() - return - } - } - } - }() -} - -func getPercentDownloaded(downloaded, total uint64) string { - percent := float32(downloaded) / float32(total/100) - - if percent > 100 { - percent = 100 - } - - return fmt.Sprintf("%.2f%%", percent) -} - -func (d *DiagnosticClient) updateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { - d.mu.Lock() - defer d.mu.Unlock() - idxs := d.GetCurrentSyncIdxs() - if idxs.Stage == -1 || idxs.SubStage == -1 { - log.Debug("[Diagnostics] Can't find running stage or substage while updating Snapshots stage stats.", "stages:", d.syncStages, "stats:", stats, "subStageInfo:", subStageInfo) - return - } - - d.syncStages[idxs.Stage].SubStages[idxs.SubStage].Stats = stats -} - -func (d *DiagnosticClient) runSegmentDownloadingListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SegmentDownloadStatistics](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SegmentDownloadStatistics{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name]; ok { - val.TotalBytes = info.TotalBytes - val.DownloadedBytes = info.DownloadedBytes - val.Webseeds = info.Webseeds - val.Peers = info.Peers - - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info - } - d.mu.Unlock() - } - } - }() -} - -func (d *DiagnosticClient) runSegmentIndexingListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshotIndexingStatistics](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshotIndexingStatistics{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.addOrUpdateSegmentIndexingState(info) - d.updateIndexingStatus() - - if d.syncStats.SnapshotIndexing.IndexingFinished { - d.SaveData() - return - } - } - } - }() -} - -func (d *DiagnosticClient) runSegmentIndexingFinishedListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshotSegmentIndexingFinishedUpdate](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshotSegmentIndexingFinishedUpdate{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - found := false - for i := range d.syncStats.SnapshotIndexing.Segments { - if d.syncStats.SnapshotIndexing.Segments[i].SegmentName == info.SegmentName { - found = true - d.syncStats.SnapshotIndexing.Segments[i].Percent = 100 - } - } - - if !found { - d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, SnapshotSegmentIndexingStatistics{ - SegmentName: info.SegmentName, - Percent: 100, - Alloc: 0, - Sys: 0, - }) - } - - d.mu.Unlock() - - d.updateIndexingStatus() - } - } - }() -} - -func (d *DiagnosticClient) updateIndexingStatus() { - totalProgressPercent := 0 - for _, seg := range d.syncStats.SnapshotIndexing.Segments { - totalProgressPercent += seg.Percent - } - - totalProgress := totalProgressPercent / len(d.syncStats.SnapshotIndexing.Segments) - - d.updateSnapshotStageStats(SyncStageStats{ - TimeElapsed: SecondsToHHMMString(uint64(d.syncStats.SnapshotIndexing.TimeElapsed)), - TimeLeft: "unknown", - Progress: fmt.Sprintf("%d%%", totalProgress), - }, "Indexing snapshots") - - if totalProgress >= 100 { - d.syncStats.SnapshotIndexing.IndexingFinished = true - } -} - -func (d *DiagnosticClient) addOrUpdateSegmentIndexingState(upd SnapshotIndexingStatistics) { - d.mu.Lock() - defer d.mu.Unlock() - if d.syncStats.SnapshotIndexing.Segments == nil { - d.syncStats.SnapshotIndexing.Segments = []SnapshotSegmentIndexingStatistics{} - } - - for i := range upd.Segments { - found := false - for j := range d.syncStats.SnapshotIndexing.Segments { - if d.syncStats.SnapshotIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { - d.syncStats.SnapshotIndexing.Segments[j].Percent = upd.Segments[i].Percent - d.syncStats.SnapshotIndexing.Segments[j].Alloc = upd.Segments[i].Alloc - d.syncStats.SnapshotIndexing.Segments[j].Sys = upd.Segments[i].Sys - found = true - break - } - } - - if !found { - d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, upd.Segments[i]) - } - } - - d.syncStats.SnapshotIndexing.TimeElapsed = upd.TimeElapsed -} - -func (d *DiagnosticClient) runSnapshotFilesListListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[SnapshoFilesList](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(SnapshoFilesList{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - d.snapshotFileList = info - d.mu.Unlock() - - if len(info.Files) > 0 { - return - } - } - } - }() -} - -func (d *DiagnosticClient) runFileDownloadedListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[FileDownloadedStatisticsUpdate](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(FileDownloadedStatisticsUpdate{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName]; ok { - val.DownloadedStats = FileDownloadedStatistics{ - TimeTook: info.TimeTook, - AverageRate: info.AverageRate, - } - - d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = SegmentDownloadStatistics{ - Name: info.FileName, - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: nil, - Peers: nil, - DownloadedStats: FileDownloadedStatistics{ - TimeTook: info.TimeTook, - AverageRate: info.AverageRate, - }, - } - } - - d.mu.Unlock() - } - } - }() -} - -func (d *DiagnosticClient) UpdateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if downloadedInfo != nil { - dwStats := FileDownloadedStatistics{ - TimeTook: downloadedInfo.TimeTook, - AverageRate: downloadedInfo.AverageRate, - } - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName]; ok { - val.DownloadedStats = dwStats - - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = SegmentDownloadStatistics{ - Name: downloadedInfo.FileName, - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: make([]SegmentPeer, 0), - Peers: make([]SegmentPeer, 0), - DownloadedStats: dwStats, - } - } - } else { - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name]; ok { - val.TotalBytes = downloadingInfo.TotalBytes - val.DownloadedBytes = downloadingInfo.DownloadedBytes - val.Webseeds = downloadingInfo.Webseeds - val.Peers = downloadingInfo.Peers - - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = *downloadingInfo - } - } -} - func (d *DiagnosticClient) runFillDBListener(rootCtx context.Context) { go func() { ctx, ch, closeChannel := Context[SnapshotFillDBStageUpdate](rootCtx, 1) @@ -353,31 +58,12 @@ func (d *DiagnosticClient) runFillDBListener(rootCtx context.Context) { totalTimeString := time.Duration(info.TimeElapsed) * time.Second - d.mu.Lock() - d.updateSnapshotStageStats(SyncStageStats{ + d.UpdateSnapshotStageStats(SyncStageStats{ TimeElapsed: totalTimeString.String(), TimeLeft: "unknown", Progress: fmt.Sprintf("%d%%", (info.Stage.Current*100)/info.Stage.Total), }, "Fill DB from snapshots") - - err := d.db.Update(d.ctx, func(tx kv.RwTx) error { - err := SnapshotFillDBUpdater(d.syncStats.SnapshotFillDB)(tx) - if err != nil { - return err - } - - err = StagesListUpdater(d.syncStages)(tx) - if err != nil { - return err - } - - return nil - }) - - if err != nil { - log.Warn("[Diagnostics] Failed to update snapshot download info", "err", err) - } - d.mu.Unlock() + d.SaveSnapshotStageStatsToDB() } } }() @@ -387,6 +73,10 @@ func (d *DiagnosticClient) SetFillDBInfo(info SnapshotFillDBStage) { d.mu.Lock() defer d.mu.Unlock() + d.setFillDBInfo(info) +} + +func (d *DiagnosticClient) setFillDBInfo(info SnapshotFillDBStage) { if d.syncStats.SnapshotFillDB.Stages == nil { d.syncStats.SnapshotFillDB.Stages = []SnapshotFillDBStage{info} } else { @@ -400,12 +90,59 @@ func (d *DiagnosticClient) SetFillDBInfo(info SnapshotFillDBStage) { } } +func (d *DiagnosticClient) SaveSnapshotStageStatsToDB() { + d.mu.Lock() + defer d.mu.Unlock() + d.saveSnapshotStageStatsToDB() +} + +func (d *DiagnosticClient) saveSnapshotStageStatsToDB() { + err := d.db.Update(d.ctx, func(tx kv.RwTx) error { + err := SnapshotFillDBUpdater(d.syncStats.SnapshotFillDB)(tx) + if err != nil { + return err + } + + err = StagesListUpdater(d.syncStages)(tx) + if err != nil { + return err + } + + return nil + }) + if err != nil { + log.Debug("[Diagnostics] Failed to update snapshot download info", "err", err) + } +} + +// Deprecated - it's not thread-safe and used only in tests. Need introduce another method or add special methods for Tests. func (d *DiagnosticClient) SyncStatistics() SyncStatistics { - return d.syncStats + var newStats SyncStatistics + statsBytes, err := json.Marshal(d.syncStats) + if err != nil { + return SyncStatistics{} + } + err = json.Unmarshal(statsBytes, &newStats) + if err != nil { + return SyncStatistics{} + } + return newStats +} + +func (d *DiagnosticClient) SyncStatsJson(w io.Writer) { + d.mu.Lock() + defer d.mu.Unlock() + if err := json.NewEncoder(w).Encode(d.syncStats); err != nil { + log.Debug("[diagnostics] SyncStatsJson", "err", err) + } } -func (d *DiagnosticClient) SnapshotFilesList() SnapshoFilesList { - return d.snapshotFileList +func (d *DiagnosticClient) SnapshotFilesListJson(w io.Writer) { + d.mu.Lock() + defer d.mu.Unlock() + if err := json.NewEncoder(w).Encode(d.snapshotFileList); err != nil { + log.Debug("[diagnostics] SnapshotFilesListJson", "err", err) + } } func SnapshotDownloadInfoFromTx(tx kv.Tx) ([]byte, error) { diff --git a/erigon-lib/diagnostics/snapshots_download.go b/erigon-lib/diagnostics/snapshots_download.go new file mode 100644 index 00000000000..693cb362602 --- /dev/null +++ b/erigon-lib/diagnostics/snapshots_download.go @@ -0,0 +1,215 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "context" + + "github.com/ledgerwatch/log/v3" +) + +func (d *DiagnosticClient) runSnapshotListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SnapshotDownloadStatistics](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SnapshotDownloadStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.SetSnapshotDownloadInfo(info) + d.UpdateSnapshotStageStats(CalculateSyncStageStats(info), "Downloading snapshots") + + if info.DownloadFinished { + d.SaveData() + return + } + } + } + }() +} + +func (d *DiagnosticClient) SetSnapshotDownloadInfo(info SnapshotDownloadStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.setSnapshotDownloadInfo(info) +} + +func (d *DiagnosticClient) setSnapshotDownloadInfo(info SnapshotDownloadStatistics) { + d.syncStats.SnapshotDownload.Downloaded = info.Downloaded + d.syncStats.SnapshotDownload.Total = info.Total + d.syncStats.SnapshotDownload.TotalTime = info.TotalTime + d.syncStats.SnapshotDownload.DownloadRate = info.DownloadRate + d.syncStats.SnapshotDownload.UploadRate = info.UploadRate + d.syncStats.SnapshotDownload.Peers = info.Peers + d.syncStats.SnapshotDownload.Files = info.Files + d.syncStats.SnapshotDownload.Connections = info.Connections + d.syncStats.SnapshotDownload.Alloc = info.Alloc + d.syncStats.SnapshotDownload.Sys = info.Sys + d.syncStats.SnapshotDownload.DownloadFinished = info.DownloadFinished + d.syncStats.SnapshotDownload.TorrentMetadataReady = info.TorrentMetadataReady +} + +func (d *DiagnosticClient) runSegmentDownloadingListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SegmentDownloadStatistics](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SegmentDownloadStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.SetDownloadSegments(info) + } + } + }() +} + +func (d *DiagnosticClient) SetDownloadSegments(info SegmentDownloadStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.setDownloadSegments(info) +} + +func (d *DiagnosticClient) setDownloadSegments(info SegmentDownloadStatistics) { + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} + } + + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name]; ok { + val.TotalBytes = info.TotalBytes + val.DownloadedBytes = info.DownloadedBytes + val.Webseeds = info.Webseeds + val.Peers = info.Peers + + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info + } +} + +func (d *DiagnosticClient) runSnapshotFilesListListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SnapshoFilesList](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SnapshoFilesList{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.SetSnapshotFilesList(info) + + if len(info.Files) > 0 { + return + } + } + } + }() +} + +func (d *DiagnosticClient) SetSnapshotFilesList(info SnapshoFilesList) { + d.mu.Lock() + defer d.mu.Unlock() + d.setSnapshotFilesList(info) +} + +func (d *DiagnosticClient) setSnapshotFilesList(info SnapshoFilesList) { + d.snapshotFileList = info +} + +func (d *DiagnosticClient) runFileDownloadedListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[FileDownloadedStatisticsUpdate](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(FileDownloadedStatisticsUpdate{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.UpdateFileDownloadedStatistics(&info, nil) + } + } + }() +} + +func (d *DiagnosticClient) UpdateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.updateFileDownloadedStatistics(downloadedInfo, downloadingInfo) +} + +func (d *DiagnosticClient) updateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { + if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { + d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} + } + + if downloadedInfo != nil { + dwStats := FileDownloadedStatistics{ + TimeTook: downloadedInfo.TimeTook, + AverageRate: downloadedInfo.AverageRate, + } + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName]; ok { + val.DownloadedStats = dwStats + + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = SegmentDownloadStatistics{ + Name: downloadedInfo.FileName, + TotalBytes: 0, + DownloadedBytes: 0, + Webseeds: make([]SegmentPeer, 0), + Peers: make([]SegmentPeer, 0), + DownloadedStats: dwStats, + } + } + } else { + if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name]; ok { + val.TotalBytes = downloadingInfo.TotalBytes + val.DownloadedBytes = downloadingInfo.DownloadedBytes + val.Webseeds = downloadingInfo.Webseeds + val.Peers = downloadingInfo.Peers + + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = val + } else { + d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = *downloadingInfo + } + } +} + +func (d *DiagnosticClient) UpdateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { + d.mu.Lock() + defer d.mu.Unlock() + d.updateSnapshotStageStats(stats, subStageInfo) +} + +func (d *DiagnosticClient) updateSnapshotStageStats(stats SyncStageStats, subStageInfo string) { + idxs := d.getCurrentSyncIdxs() + if idxs.Stage == -1 || idxs.SubStage == -1 { + log.Debug("[Diagnostics] Can't find running stage or substage while updating Snapshots stage stats.", "stages:", d.syncStages, "stats:", stats, "subStageInfo:", subStageInfo) + return + } + + d.syncStages[idxs.Stage].SubStages[idxs.SubStage].Stats = stats +} diff --git a/erigon-lib/diagnostics/snapshots_indexing.go b/erigon-lib/diagnostics/snapshots_indexing.go new file mode 100644 index 00000000000..abb37b07186 --- /dev/null +++ b/erigon-lib/diagnostics/snapshots_indexing.go @@ -0,0 +1,108 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/log/v3" +) + +func (d *DiagnosticClient) runSegmentIndexingListener(rootCtx context.Context) { + go func() { + ctx, ch, closeChannel := Context[SnapshotIndexingStatistics](rootCtx, 1) + defer closeChannel() + + StartProviders(ctx, TypeOf(SnapshotIndexingStatistics{}), log.Root()) + for { + select { + case <-rootCtx.Done(): + return + case info := <-ch: + d.AddOrUpdateSegmentIndexingState(info) + indexingFinished := d.UpdateIndexingStatus() + if indexingFinished { + d.SaveData() + return + } + } + } + }() +} + +func (d *DiagnosticClient) AddOrUpdateSegmentIndexingState(upd SnapshotIndexingStatistics) { + d.mu.Lock() + defer d.mu.Unlock() + d.addOrUpdateSegmentIndexingState(upd) +} + +func (d *DiagnosticClient) addOrUpdateSegmentIndexingState(upd SnapshotIndexingStatistics) { + if d.syncStats.SnapshotIndexing.Segments == nil { + d.syncStats.SnapshotIndexing.Segments = []SnapshotSegmentIndexingStatistics{} + } + + for i := range upd.Segments { + found := false + for j := range d.syncStats.SnapshotIndexing.Segments { + if d.syncStats.SnapshotIndexing.Segments[j].SegmentName == upd.Segments[i].SegmentName { + d.syncStats.SnapshotIndexing.Segments[j].Percent = upd.Segments[i].Percent + d.syncStats.SnapshotIndexing.Segments[j].Alloc = upd.Segments[i].Alloc + d.syncStats.SnapshotIndexing.Segments[j].Sys = upd.Segments[i].Sys + found = true + break + } + } + + if !found { + d.syncStats.SnapshotIndexing.Segments = append(d.syncStats.SnapshotIndexing.Segments, upd.Segments[i]) + } + } + + // If elapsed time is equal to minus one it menas that indexing took less than main loop update and we should not update it + if upd.TimeElapsed != -1 { + d.syncStats.SnapshotIndexing.TimeElapsed = upd.TimeElapsed + } +} + +func (d *DiagnosticClient) UpdateIndexingStatus() (indexingFinished bool) { + d.mu.Lock() + defer d.mu.Unlock() + + return d.updateIndexingStatus() +} + +func (d *DiagnosticClient) updateIndexingStatus() (indexingFinished bool) { + totalProgressPercent := 0 + for _, seg := range d.syncStats.SnapshotIndexing.Segments { + totalProgressPercent += seg.Percent + } + + totalProgress := totalProgressPercent / len(d.syncStats.SnapshotIndexing.Segments) + + d.updateSnapshotStageStats(SyncStageStats{ + TimeElapsed: SecondsToHHMMString(uint64(d.syncStats.SnapshotIndexing.TimeElapsed)), + TimeLeft: "unknown", + Progress: fmt.Sprintf("%d%%", totalProgress), + }, "Indexing snapshots") + + if totalProgress >= 100 { + d.syncStats.SnapshotIndexing.IndexingFinished = true + } + + return d.syncStats.SnapshotIndexing.IndexingFinished +} diff --git a/erigon-lib/diagnostics/snapshots_test.go b/erigon-lib/diagnostics/snapshots_test.go index 3c8fef60d4c..f3d1ad3982a 100644 --- a/erigon-lib/diagnostics/snapshots_test.go +++ b/erigon-lib/diagnostics/snapshots_test.go @@ -1,3 +1,19 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics_test import ( @@ -26,7 +42,9 @@ func TestUpdateFileDownloadingStats(t *testing.T) { d.UpdateFileDownloadedStatistics(&fileDownloadedUpdMock, nil) - require.Equal(t, sd["test"], diagnostics.SegmentDownloadStatistics{ + sd = d.SyncStatistics().SnapshotDownload.SegmentsDownloading + + toccompare := diagnostics.SegmentDownloadStatistics{ Name: "test", TotalBytes: 1, DownloadedBytes: 1, @@ -36,7 +54,8 @@ func TestUpdateFileDownloadingStats(t *testing.T) { TimeTook: 1.0, AverageRate: 1, }, - }) + } + require.Equal(t, sd["test"], toccompare) } var ( @@ -55,3 +74,111 @@ var ( DownloadedStats: diagnostics.FileDownloadedStatistics{}, } ) + +func TestPercentDiownloaded(t *testing.T) { + downloaded := uint64(10) + total := uint64(100) + files := int32(20) + torrentMetadataReady := int32(10) + + //Test metadata not ready + progress := diagnostics.GetShanpshotsPercentDownloaded(downloaded, total, torrentMetadataReady, files) + require.Equal(t, progress, "calculating...") + + //Test metadata ready + progress = diagnostics.GetShanpshotsPercentDownloaded(downloaded, total, files, files) + require.Equal(t, progress, "10%") + + //Test 100 % + progress = diagnostics.GetShanpshotsPercentDownloaded(total, total, files, files) + require.Equal(t, progress, "100%") + + //Test 0 % + progress = diagnostics.GetShanpshotsPercentDownloaded(0, total, files, files) + require.Equal(t, progress, "0%") + + //Test more than 100 % + progress = diagnostics.GetShanpshotsPercentDownloaded(total+1, total, files, files) + require.Equal(t, progress, "100%") +} + +func TestFillDBFromSnapshots(t *testing.T) { + d, err := NewTestDiagnosticClient() + require.NoError(t, err) + + d.SetFillDBInfo(diagnostics.SnapshotFillDBStage{StageName: "Headers", Current: 1, Total: 10}) + stats := d.SyncStatistics() + require.NotEmpty(t, stats.SnapshotFillDB.Stages) + require.Equal(t, stats.SnapshotFillDB.Stages[0], diagnostics.SnapshotFillDBStage{StageName: "Headers", Current: 1, Total: 10}) +} + +func TestAddOrUpdateSegmentIndexingState(t *testing.T) { + dts := []diagnostics.SnapshotSegmentIndexingStatistics{ + { + SegmentName: "test", + Percent: 50, + Alloc: 0, + Sys: 0, + }, + } + + d, err := NewTestDiagnosticClient() + require.NoError(t, err) + + d.AddOrUpdateSegmentIndexingState(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: -1, + }) + stats := d.SyncStatistics() + + require.NotEmpty(t, stats.SnapshotIndexing) + require.NotEmpty(t, stats.SnapshotIndexing.Segments) + require.Equal(t, stats.SnapshotIndexing.Segments[0], dts[0]) + require.True(t, stats.SnapshotIndexing.TimeElapsed == 0) + require.False(t, stats.SnapshotIndexing.IndexingFinished) + + dts = []diagnostics.SnapshotSegmentIndexingStatistics{ + { + SegmentName: "test", + Percent: 100, + Alloc: 0, + Sys: 0, + }, + { + SegmentName: "test2", + Percent: 10, + Alloc: 0, + Sys: 0, + }, + } + + d.AddOrUpdateSegmentIndexingState(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: 20, + }) + + stats = d.SyncStatistics() + require.Equal(t, stats.SnapshotIndexing.Segments[0].Percent, 100) + + finished := d.UpdateIndexingStatus() + require.False(t, finished) + + //test indexing finished + dts = []diagnostics.SnapshotSegmentIndexingStatistics{ + { + SegmentName: "test2", + Percent: 100, + Alloc: 0, + Sys: 0, + }, + } + d.AddOrUpdateSegmentIndexingState(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: 20, + }) + + finished = d.UpdateIndexingStatus() + require.True(t, finished) + stats = d.SyncStatistics() + require.True(t, stats.SnapshotIndexing.IndexingFinished) +} diff --git a/erigon-lib/diagnostics/speedtest.go b/erigon-lib/diagnostics/speedtest.go index 54fad6f161d..ffcbd72efdd 100644 --- a/erigon-lib/diagnostics/speedtest.go +++ b/erigon-lib/diagnostics/speedtest.go @@ -1,75 +1,83 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" + "io" + "net/http" "time" - "github.com/showwin/speedtest-go/speedtest" - "github.com/showwin/speedtest-go/speedtest/transport" + "github.com/erigontech/speedtest/speedtest" + "github.com/ledgerwatch/log/v3" ) +var cloudflareHeaders = http.Header{ + "lsjdjwcush6jbnjj3jnjscoscisoc5s": []string{"I%OSJDNFKE783DDHHJD873EFSIVNI7384R78SSJBJBCCJBC32JABBJCBJK45"}, +} + func (d *DiagnosticClient) setupSpeedtestDiagnostics(rootCtx context.Context) { go func() { if d.speedTest { d.networkSpeedMutex.Lock() + defer d.networkSpeedMutex.Unlock() d.networkSpeed = d.runSpeedTest(rootCtx) - d.networkSpeedMutex.Unlock() } }() } -var cacheServerList speedtest.Servers - func (d *DiagnosticClient) runSpeedTest(rootCtx context.Context) NetworkSpeedTestResult { - var speedtestClient = speedtest.New() - - serverList, err := speedtestClient.FetchServers() - // Ensure that the server list can rolled back to the previous cache. - if err == nil { - cacheServerList = serverList + result := NetworkSpeedTestResult{ + Latency: time.Duration(0), + DownloadSpeed: float64(0), + UploadSpeed: float64(0), + PacketLoss: float64(-1), } - targets, _ := cacheServerList.FindServer([]int{}) - - latency := time.Duration(0) - downloadSpeed := float64(0) - uploadSpeed := float64(0) - packetLoss := float64(-1) - - analyzer := speedtest.NewPacketLossAnalyzer(nil) - - if len(targets) > 0 { - s := targets[0] - err = s.PingTestContext(rootCtx, nil) - if err == nil { - latency = s.Latency - } - err = s.DownloadTestContext(rootCtx) - if err == nil { - downloadSpeed = s.DLSpeed.Mbps() - } - - err = s.UploadTestContext(rootCtx) - if err == nil { - uploadSpeed = s.ULSpeed.Mbps() - } + urlstr, err := speedtest.SelectSegmentFromWebseeds(d.webseedsList, cloudflareHeaders) + if err != nil { + log.Debug("[diagnostics] runSpeedTest", "err", err) + return result + } - ctx, cancel := context.WithTimeout(rootCtx, time.Second*15) + s, err := speedtest.CustomServer(urlstr) + if err != nil { + log.Debug("[diagnostics] runSpeedTest", "err", err) + return result + } - defer cancel() - _ = analyzer.RunWithContext(ctx, s.Host, func(pl *transport.PLoss) { - packetLoss = pl.Loss() - }) + err = s.PingTestContext(rootCtx, nil) + if err == nil { + result.Latency = s.Latency } - return NetworkSpeedTestResult{ - Latency: latency, - DownloadSpeed: downloadSpeed, - UploadSpeed: uploadSpeed, - PacketLoss: packetLoss, + err = s.DownloadTestContext(rootCtx) + if err == nil { + result.DownloadSpeed = s.DLSpeed.Mbps() } + + return result } -func (d *DiagnosticClient) GetNetworkSpeed() NetworkSpeedTestResult { - return d.networkSpeed +func (d *DiagnosticClient) NetworkSpeedJson(w io.Writer) { + d.networkSpeedMutex.Lock() + defer d.networkSpeedMutex.Unlock() + if err := json.NewEncoder(w).Encode(d.networkSpeed); err != nil { + log.Debug("[diagnostics] ResourcesUsageJson", "err", err) + } } diff --git a/erigon-lib/diagnostics/stages.go b/erigon-lib/diagnostics/stages.go index ac911fa0934..a38aa60e5e4 100644 --- a/erigon-lib/diagnostics/stages.go +++ b/erigon-lib/diagnostics/stages.go @@ -1,8 +1,26 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "context" + "encoding/json" "fmt" + "io" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -159,6 +177,12 @@ func (d *DiagnosticClient) runSubStageListener(rootCtx context.Context) { } func (d *DiagnosticClient) GetCurrentSyncIdxs() CurrentSyncStagesIdxs { + d.mu.Lock() + defer d.mu.Unlock() + return d.getCurrentSyncIdxs() +} + +func (d *DiagnosticClient) getCurrentSyncIdxs() CurrentSyncStagesIdxs { currentIdxs := CurrentSyncStagesIdxs{ Stage: -1, SubStage: -1, @@ -267,7 +291,12 @@ func (d *DiagnosticClient) SetCurrentSyncSubStage(css CurrentSyncSubStage) { } } +// Deprecated - used only in tests. Non-thread-safe. func (d *DiagnosticClient) GetStageState(stageId string) (StageState, error) { + return d.getStageState(stageId) +} + +func (d *DiagnosticClient) getStageState(stageId string) (StageState, error) { for _, stage := range d.syncStages { if stage.ID == stageId { return stage.State, nil @@ -295,6 +324,15 @@ func StagesListUpdater(info []SyncStage) func(tx kv.RwTx) error { return PutDataToTable(kv.DiagSyncStages, StagesListKey, info) } +// Deprecated - not thread-safe method. Used only in tests. Need introduce more thread-safe method or something special for tests. func (d *DiagnosticClient) GetSyncStages() []SyncStage { return d.syncStages } + +func (d *DiagnosticClient) SyncStagesJson(w io.Writer) { + d.mu.Lock() + defer d.mu.Unlock() + if err := json.NewEncoder(w).Encode(d.syncStages); err != nil { + log.Debug("[diagnostics] HardwareInfoJson", "err", err) + } +} diff --git a/erigon-lib/diagnostics/sys_info.go b/erigon-lib/diagnostics/sys_info.go index b470eab2aba..97620910ff1 100644 --- a/erigon-lib/diagnostics/sys_info.go +++ b/erigon-lib/diagnostics/sys_info.go @@ -1,6 +1,25 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( + "encoding/json" + "io" + "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/disk" "github.com/shirou/gopsutil/v3/mem" @@ -18,6 +37,9 @@ var ( ) func (d *DiagnosticClient) setupSysInfoDiagnostics() { + d.mu.Lock() + defer d.mu.Unlock() + sysInfo := GetSysInfo(d.dataDirPath) var funcs []func(tx kv.RwTx) error @@ -33,18 +55,18 @@ func (d *DiagnosticClient) setupSysInfoDiagnostics() { return nil }) - if err != nil { log.Warn("[Diagnostics] Failed to update system info", "err", err) } - - d.mu.Lock() d.hardwareInfo = sysInfo - d.mu.Unlock() } -func (d *DiagnosticClient) HardwareInfo() HardwareInfo { - return d.hardwareInfo +func (d *DiagnosticClient) HardwareInfoJson(w io.Writer) { + d.mu.Lock() + defer d.mu.Unlock() + if err := json.NewEncoder(w).Encode(d.hardwareInfo); err != nil { + log.Debug("[diagnostics] HardwareInfoJson", "err", err) + } } func findNodeDisk(dirPath string) string { @@ -68,25 +90,30 @@ func GetSysInfo(dirPath string) HardwareInfo { } func GetRAMInfo() RAMInfo { - totalRAM := uint64(0) - freeRAM := uint64(0) + rmi := RAMInfo{ + Total: 0, + Available: 0, + Used: 0, + UsedPercent: 0, + } vmStat, err := mem.VirtualMemory() if err == nil { - totalRAM = vmStat.Total - freeRAM = vmStat.Free + rmi.Total = vmStat.Total + rmi.Available = vmStat.Available + rmi.Used = vmStat.Used + rmi.UsedPercent = vmStat.UsedPercent } - return RAMInfo{ - Total: totalRAM, - Free: freeRAM, - } + return rmi } func GetDiskInfo(nodeDisk string) DiskInfo { fsType := "" total := uint64(0) free := uint64(0) + mountPoint := "/" + device := "/" partitions, err := disk.Partitions(false) @@ -98,6 +125,8 @@ func GetDiskInfo(nodeDisk string) DiskInfo { fsType = partition.Fstype total = iocounters.Total free = iocounters.Free + mountPoint = partition.Mountpoint + device = partition.Device break } @@ -105,34 +134,36 @@ func GetDiskInfo(nodeDisk string) DiskInfo { } } + diskDetails, err := diskutils.DiskInfo(device) + if err != nil { + log.Debug("[diagnostics] Failed to get disk info", "err", err) + } + return DiskInfo{ - FsType: fsType, - Total: total, - Free: free, + FsType: fsType, + Total: total, + Free: free, + MountPoint: mountPoint, + Device: device, + Details: diskDetails, } } -func GetCPUInfo() CPUInfo { - modelName := "" - cores := 0 - mhz := float64(0) +func GetCPUInfo() []CPUInfo { + cpuinfo := make([]CPUInfo, 0) cpuInfo, err := cpu.Info() if err == nil { for _, info := range cpuInfo { - modelName = info.ModelName - cores = int(info.Cores) - mhz = info.Mhz - - break + cpuinfo = append(cpuinfo, CPUInfo{ + ModelName: info.ModelName, + Cores: info.Cores, + Mhz: info.Mhz, + }) } } - return CPUInfo{ - ModelName: modelName, - Cores: cores, - Mhz: mhz, - } + return cpuinfo } func ReadRAMInfoFromTx(tx kv.Tx) ([]byte, error) { @@ -166,7 +197,7 @@ func RAMInfoUpdater(info RAMInfo) func(tx kv.RwTx) error { return PutDataToTable(kv.DiagSystemInfo, SystemRamInfoKey, info) } -func CPUInfoUpdater(info CPUInfo) func(tx kv.RwTx) error { +func CPUInfoUpdater(info []CPUInfo) func(tx kv.RwTx) error { return PutDataToTable(kv.DiagSystemInfo, SystemCpuInfoKey, info) } diff --git a/erigon-lib/diagnostics/utils.go b/erigon-lib/diagnostics/utils.go index fd6eef81fb7..f9c4e75d049 100644 --- a/erigon-lib/diagnostics/utils.go +++ b/erigon-lib/diagnostics/utils.go @@ -1,9 +1,26 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + package diagnostics import ( "encoding/json" "fmt" "reflect" + "time" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/log/v3" @@ -63,14 +80,21 @@ func InitSubStagesFromList(list []string) []SyncSubStage { func CalculateTime(amountLeft, rate uint64) string { if rate == 0 { - return "999hrs:99m" + return "999h:99m" } timeLeftInSeconds := amountLeft / rate hours := timeLeftInSeconds / 3600 minutes := (timeLeftInSeconds / 60) % 60 - return fmt.Sprintf("%dhrs:%dm", hours, minutes) + if hours == 0 && minutes == 0 { + return fmt.Sprintf("%ds", timeLeftInSeconds) + } else if hours == 0 { + //sec := timeLeftInSeconds % 60 + return fmt.Sprintf("%dm:%ds", minutes, timeLeftInSeconds%60) + } + + return fmt.Sprintf("%dh:%dm", hours, minutes) } func SecondsToHHMMString(seconds uint64) string { @@ -94,3 +118,47 @@ func ParseData(data []byte, v interface{}) { log.Warn("[Diagnostics] Failed to parse data", "data", string(data), "type", reflect.TypeOf(v)) } } + +func CalculateSyncStageStats(info SnapshotDownloadStatistics) SyncStageStats { + downloadedPercent := GetShanpshotsPercentDownloaded(info.Downloaded, info.Total, info.TorrentMetadataReady, info.Files) + remainingBytes := info.Total - info.Downloaded + downloadTimeLeft := CalculateTime(remainingBytes, info.DownloadRate) + totalDownloadTimeString := time.Duration(info.TotalTime) * time.Second + + return SyncStageStats{ + TimeElapsed: totalDownloadTimeString.String(), + TimeLeft: downloadTimeLeft, + Progress: downloadedPercent, + } +} + +func GetShanpshotsPercentDownloaded(downloaded uint64, total uint64, torrentMetadataReady int32, files int32) string { + if torrentMetadataReady < files { + return "calculating..." + } + + if downloaded == 0 || total == 0 { + return "0%" + } + + fd := float32(downloaded) + t100 := float32(total) / 100 + ft := float32(t100) + percent := fd / ft + + if percent > 100 { + percent = 100 + } + + // return the percentage with 2 decimal places if it's not .00 + if percent == float32(int(percent)) { + return fmt.Sprintf("%.0f%%", percent) + } + + // return the percentage with 1 decimal places if it has only one decimal place like (50.5% or 23.7%) + if percent == float32(int(percent*10))/10 { + return fmt.Sprintf("%.1f%%", percent) + } + + return fmt.Sprintf("%.2f%%", percent) +} diff --git a/erigon-lib/diagnostics/utils_test.go b/erigon-lib/diagnostics/utils_test.go index 1ca8f11bc9d..1c5a6d439e7 100644 --- a/erigon-lib/diagnostics/utils_test.go +++ b/erigon-lib/diagnostics/utils_test.go @@ -15,8 +15,10 @@ func TestParseData(t *testing.T) { require.Equal(t, diagnostics.RAMInfo{}, v) newv := diagnostics.RAMInfo{ - Total: 1, - Free: 2, + Total: 1, + Available: 2, + Used: 3, + UsedPercent: 4, } data, err := json.Marshal(newv) @@ -25,3 +27,46 @@ func TestParseData(t *testing.T) { diagnostics.ParseData(data, &v) require.Equal(t, newv, v) } + +// Testing the function CalculateSyncStageStats +func TestCalculateSyncStageStats(t *testing.T) { + sds := diagnostics.SnapshotDownloadStatistics{ + Downloaded: 100, + Total: 200, + TorrentMetadataReady: 10, + Files: 10, + DownloadRate: 10, + TotalTime: 1000, + } + + expected := diagnostics.SyncStageStats{ + TimeElapsed: "16m40s", + TimeLeft: "10s", + Progress: "50%", + } + + require.Equal(t, expected, diagnostics.CalculateSyncStageStats(sds)) +} + +// Test CalculateTime function +func TestCalculateTime(t *testing.T) { + require.Equal(t, "999h:99m", diagnostics.CalculateTime(0, 0)) + require.Equal(t, "999h:99m", diagnostics.CalculateTime(1, 0)) + require.Equal(t, "1s", diagnostics.CalculateTime(1, 1)) + require.Equal(t, "10s", diagnostics.CalculateTime(10, 1)) + require.Equal(t, "2m:40s", diagnostics.CalculateTime(160, 1)) + require.Equal(t, "1h:40m", diagnostics.CalculateTime(6000, 1)) +} + +// Test GetShanpshotsPercentDownloaded function +func TestGetShanpshotsPercentDownloaded(t *testing.T) { + require.Equal(t, "0%", diagnostics.GetShanpshotsPercentDownloaded(0, 0, 0, 0)) + require.Equal(t, "0%", diagnostics.GetShanpshotsPercentDownloaded(0, 1, 0, 0)) + require.Equal(t, "100%", diagnostics.GetShanpshotsPercentDownloaded(1, 1, 1, 1)) + require.Equal(t, "50%", diagnostics.GetShanpshotsPercentDownloaded(1, 2, 1, 1)) + + require.Equal(t, "50.01%", diagnostics.GetShanpshotsPercentDownloaded(5001, 10000, 1, 1)) + require.Equal(t, "50.5%", diagnostics.GetShanpshotsPercentDownloaded(5050, 10000, 1, 1)) + + require.Equal(t, "calculating...", diagnostics.GetShanpshotsPercentDownloaded(10000, 10000, 0, 1)) +} diff --git a/erigon-lib/diskutils/diskutils.go b/erigon-lib/diskutils/diskutils.go index 8a908716af4..a668fcb7c76 100644 --- a/erigon-lib/diskutils/diskutils.go +++ b/erigon-lib/diskutils/diskutils.go @@ -10,3 +10,8 @@ func MountPointForDirPath(dirPath string) string { log.Debug("[diskutils] Implemented only for darwin") return "/" } + +func DiskInfo(disk string) (string, error) { + log.Debug("[diskutils] Implemented only for darwin") + return "", nil +} diff --git a/erigon-lib/diskutils/diskutils_darwin.go b/erigon-lib/diskutils/diskutils_darwin.go index a1383766d0a..60d161c215c 100644 --- a/erigon-lib/diskutils/diskutils_darwin.go +++ b/erigon-lib/diskutils/diskutils_darwin.go @@ -3,7 +3,9 @@ package diskutils import ( + "bytes" "os" + "os/exec" "syscall" "github.com/ledgerwatch/log/v3" @@ -49,3 +51,16 @@ func SmlinkForDirPath(dirPath string) string { return dirPath } } + +func DiskInfo(disk string) (string, error) { + cmd := exec.Command("diskutil", "info", disk) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + + output := out.String() + return output, nil +} diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 6d9f7e0a638..b7eb155adf7 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -88,6 +88,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/erigontech/speedtest v0.0.2 github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 // indirect github.com/go-llsqlite/crawshaw v0.4.0 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -127,7 +128,6 @@ require ( github.com/prometheus/procfs v0.12.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/showwin/speedtest-go v1.7.7 github.com/sirupsen/logrus v1.9.3 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index fd52c78fac8..86205fd0fea 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -143,6 +143,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erigontech/mdbx-go v0.27.24 h1:jNsRE/4jC1F3S5SpAbmgT5jrEkfrdFk2MKEL9toVPxo= github.com/erigontech/mdbx-go v0.27.24/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= +github.com/erigontech/speedtest v0.0.2 h1:W9Cvky/8AMUtUONwkLA/dZjeQ2XfkBdYfJzvhMZUO+U= +github.com/erigontech/speedtest v0.0.2/go.mod h1:vulsRNiM51BmSTbVtch4FWxKxx53pS2D35lZTtao0bw= github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= @@ -415,8 +417,6 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/showwin/speedtest-go v1.7.7 h1:VmK75SZOTKiuWjIVrs+mo7ZoKEw0utiGCvpnurS0olU= -github.com/showwin/speedtest-go v1.7.7/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= diff --git a/go.mod b/go.mod index c56c2030818..fc440a32f43 100644 --- a/go.mod +++ b/go.mod @@ -159,6 +159,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/elastic/gosigar v0.14.2 // indirect + github.com/erigontech/speedtest v0.0.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect @@ -261,7 +262,6 @@ require ( github.com/shirou/gopsutil/v3 v3.24.3 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/showwin/speedtest-go v1.7.7 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sosodev/duration v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/go.sum b/go.sum index 9811d21cf7e..6e7a1602495 100644 --- a/go.sum +++ b/go.sum @@ -272,6 +272,8 @@ github.com/erigontech/mdbx-go v0.27.24 h1:jNsRE/4jC1F3S5SpAbmgT5jrEkfrdFk2MKEL9t github.com/erigontech/mdbx-go v0.27.24/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/erigontech/silkworm-go v0.18.0 h1:j56p61xZHBFhZGH1OixlGU8KcfjHzcw9pjAfjmVsOZA= github.com/erigontech/silkworm-go v0.18.0/go.mod h1:O50ux0apICEVEGyRWiE488K8qz8lc3PA/SXbQQAc8SU= +github.com/erigontech/speedtest v0.0.2 h1:W9Cvky/8AMUtUONwkLA/dZjeQ2XfkBdYfJzvhMZUO+U= +github.com/erigontech/speedtest v0.0.2/go.mod h1:vulsRNiM51BmSTbVtch4FWxKxx53pS2D35lZTtao0bw= github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= @@ -821,8 +823,6 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/showwin/speedtest-go v1.7.7 h1:VmK75SZOTKiuWjIVrs+mo7ZoKEw0utiGCvpnurS0olU= -github.com/showwin/speedtest-go v1.7.7/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 2c203945194..89e75e1e2a9 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -894,11 +894,18 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf } func notifySegmentIndexingFinished(name string) { - diagnostics.Send( - diagnostics.SnapshotSegmentIndexingFinishedUpdate{ + dts := []diagnostics.SnapshotSegmentIndexingStatistics{ + diagnostics.SnapshotSegmentIndexingStatistics{ SegmentName: name, + Percent: 100, + Alloc: 0, + Sys: 0, }, - ) + } + diagnostics.Send(diagnostics.SnapshotIndexingStatistics{ + Segments: dts, + TimeElapsed: -1, + }) } func sendDiagnostics(startIndexingTime time.Time, indexPercent map[string]int, alloc uint64, sys uint64) { From 4cad1f957d64ed49a9ed3c0685d6681e96b90cc2 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 6 Aug 2024 13:39:37 +0700 Subject: [PATCH 07/49] release e3 files to e2 (#11490) --- erigon-lib/go.mod | 2 +- erigon-lib/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index b7eb155adf7..badff03bf90 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.27.24 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240717084354-f9b3622c55ba + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb github.com/ledgerwatch/interfaces v0.0.0-20240320062914-b57f05746087 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index 86205fd0fea..bfcd00e3b31 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -269,8 +269,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240717084354-f9b3622c55ba h1:pG3hPilkaYxng8eldvsLtMAixdRN04Lu49jH6LQNvnM= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240717084354-f9b3622c55ba/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb h1:bsoVxjnQGxhOODRmkdrbkRTB9+sIduguoNMSZPRRoTI= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/interfaces v0.0.0-20240320062914-b57f05746087 h1:Y59HUAT/+02Qbm6g7MuY7i8E0kUihPe7+ftDnR8oQzQ= github.com/ledgerwatch/interfaces v0.0.0-20240320062914-b57f05746087/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= diff --git a/go.mod b/go.mod index fc440a32f43..9efebeb59fd 100644 --- a/go.mod +++ b/go.mod @@ -186,7 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240717084354-f9b3622c55ba // indirect + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect diff --git a/go.sum b/go.sum index 6e7a1602495..a50321c625d 100644 --- a/go.sum +++ b/go.sum @@ -538,8 +538,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240717084354-f9b3622c55ba h1:pG3hPilkaYxng8eldvsLtMAixdRN04Lu49jH6LQNvnM= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240717084354-f9b3622c55ba/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb h1:bsoVxjnQGxhOODRmkdrbkRTB9+sIduguoNMSZPRRoTI= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/erigonwatch v0.1.2 h1:/jq0r3oFh61pYk65Rw10aeCJj2Mzs1E2AG6TXG668og= github.com/ledgerwatch/erigonwatch v0.1.2/go.mod h1:5K2cWaom0/rURye4dUUEQg2UyCH2A5zHVp86TDDMaA4= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= From 9e9e143d4e4bd4cd8cb9d06a84b683251e566f36 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Sun, 11 Aug 2024 13:57:04 +0100 Subject: [PATCH 08/49] don't use lfs for consensus spec tests (#11545) (#11560) Co-authored-by: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> --- cl/spectest/Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cl/spectest/Makefile b/cl/spectest/Makefile index b507c4613b1..fda0b4893e6 100644 --- a/cl/spectest/Makefile +++ b/cl/spectest/Makefile @@ -2,11 +2,9 @@ tests: - GIT_LFS_SKIP_SMUDGE=1 GIT_CLONE_PROTECTION_ACTIVE=false git clone https://github.com/ethereum/consensus-spec-tests - cd consensus-spec-tests && git checkout 080c96fbbf3be58e75947debfeb9ba3b2b7c9748 && git lfs pull --exclude=tests/general,tests/minimal && cd .. - mv consensus-spec-tests/tests . - rm -rf consensus-spec-tests - rm -rf tests/minimal + wget https://github.com/ethereum/consensus-spec-tests/releases/download/v1.4.0-beta.5/mainnet.tar.gz + tar xf mainnet.tar.gz + rm mainnet.tar.gz # not needed for now rm -rf tests/mainnet/eip6110 clean: From e42dcfa409d5b05dcbff93e3bbea9c74ead25a2f Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Sun, 11 Aug 2024 16:03:18 +0100 Subject: [PATCH 09/49] turbo/snapshotsync: Fmt fix (#11493) (#11559) Removed unnecessary struct type Co-authored-by: Somnath --- turbo/snapshotsync/freezeblocks/block_snapshots.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 89e75e1e2a9..ee0449ea4a8 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -895,7 +895,7 @@ func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainConfig *chain.Conf func notifySegmentIndexingFinished(name string) { dts := []diagnostics.SnapshotSegmentIndexingStatistics{ - diagnostics.SnapshotSegmentIndexingStatistics{ + { SegmentName: name, Percent: 100, Alloc: 0, From 0f8ad6c58ac1717ce366351af71d37eaafb01539 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Sun, 11 Aug 2024 20:42:44 +0100 Subject: [PATCH 10/49] diagnostics: updated sys info to include CPU stats (#11497) (#11561) - Added CPU overall info to report file Example output: ![Screenshot 2024-08-06 at 09 07 51](https://github.com/user-attachments/assets/834c3f48-ec37-48ea-8688-ee6ea00186a4) --- cmd/diag/sysinfo/sysinfo.go | 144 ++++++++++++++++++++++++++++++++++++ cmd/diag/util/util.go | 33 +++++++++ 2 files changed, 177 insertions(+) create mode 100644 cmd/diag/sysinfo/sysinfo.go diff --git a/cmd/diag/sysinfo/sysinfo.go b/cmd/diag/sysinfo/sysinfo.go new file mode 100644 index 00000000000..6999afb5ad3 --- /dev/null +++ b/cmd/diag/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package sysinfo + +import ( + "fmt" + "strconv" + "strings" + + "github.com/urfave/cli/v2" + + "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/erigon/cmd/diag/flags" + "github.com/ledgerwatch/erigon/cmd/diag/util" +) + +var ( + ExportPathFlag = cli.StringFlag{ + Name: "export.path", + Aliases: []string{"ep"}, + Usage: "Path to folder for export result", + Required: true, + Value: "", + } + + ExportFileNameFlag = cli.StringFlag{ + Name: "export.file", + Aliases: []string{"ef"}, + Usage: "File name to export result default is sysinfo.txt", + Required: false, + Value: "sysinfo.txt", + } +) + +var Command = cli.Command{ + Name: "sysinfo", + Aliases: []string{"sinfo"}, + ArgsUsage: "", + Action: collectInfo, + Flags: []cli.Flag{ + &flags.DebugURLFlag, + &ExportPathFlag, + &ExportFileNameFlag, + }, + Description: "Collect information about system and save it to file in order to provide to support person", +} + +func collectInfo(cliCtx *cli.Context) error { + data, err := getData(cliCtx) + if err != nil { + util.RenderError(err) + } + + var builder strings.Builder + builder.WriteString("Disk info:\n") + builder.WriteString(data.Disk.Details) + builder.WriteString("\n\n") + builder.WriteString("CPU info:\n") + writeCPUToStringBuilder(data.CPU, &builder) + + // Save data to file + err = util.SaveDataToFile(cliCtx.String(ExportPathFlag.Name), cliCtx.String(ExportFileNameFlag.Name), builder.String()) + if err != nil { + util.RenderError(err) + } + + return nil +} + +func writeCPUToStringBuilder(cpuInfo []diagnostics.CPUInfo, builder *strings.Builder) { + spacing := calculateSpacing([]string{"CPU", "VendorID", "Family", "Model", "Stepping", "PhysicalID", "CoreID", "Cores", "ModelName", "Mhz", "CacheSize", "Flags", "Microcode"}) + + for _, cpu := range cpuInfo { + writeStringToBuilder(builder, "CPU", strconv.Itoa(int(cpu.CPU)), spacing) + writeStringToBuilder(builder, "VendorID", cpu.VendorID, spacing) + writeStringToBuilder(builder, "Family", cpu.Family, spacing) + writeStringToBuilder(builder, "Model", cpu.Model, spacing) + writeStringToBuilder(builder, "Stepping", strconv.Itoa(int(cpu.Stepping)), spacing) + writeStringToBuilder(builder, "PhysicalID", cpu.PhysicalID, spacing) + writeStringToBuilder(builder, "CoreID", cpu.CoreID, spacing) + writeStringToBuilder(builder, "Cores", strconv.Itoa(int(cpu.Cores)), spacing) + writeStringToBuilder(builder, "ModelName", cpu.ModelName, spacing) + writeStringToBuilder(builder, "Mhz", fmt.Sprintf("%g", cpu.Mhz), spacing) + writeStringToBuilder(builder, "CacheSize", strconv.Itoa(int(cpu.CacheSize)), spacing) + writeStringToBuilder(builder, "Flags", strings.Join(cpu.Flags, ", "), spacing) + writeStringToBuilder(builder, "Microcode", cpu.Microcode, spacing) + } +} + +func calculateSpacing(keysArray []string) int { + max := 0 + for _, key := range keysArray { + if len(key) > max { + max = len(key) + } + } + + return max + 3 +} + +func writeStringToBuilder(result *strings.Builder, name string, value string, spacing int) { + marging := 3 + if value == "" { + value = "N/A" + } + + writeSpacesToBuilder(result, marging) + result.WriteString(name) + result.WriteString(":") + writeSpacesToBuilder(result, spacing-len(name)-1) + result.WriteString(value) + result.WriteString("\n") +} + +func writeSpacesToBuilder(result *strings.Builder, spaces int) { + result.WriteString(strings.Repeat(" ", spaces)) +} + +func getData(cliCtx *cli.Context) (diagnostics.HardwareInfo, error) { + var data diagnostics.HardwareInfo + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/hardware-info" + + err := util.MakeHttpGetCall(cliCtx.Context, url, &data) + + if err != nil { + return data, err + } + + return data, nil +} diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index 43cda2761a7..72d40f29bee 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -95,3 +95,36 @@ func RenderError(err error) { txt := text.Colors{text.FgWhite, text.BgRed} fmt.Printf("%s %s\n", txt.Sprint("[ERROR]"), err) } + +func SaveDataToFile(filePath string, fileName string, data string) error { + //check is folder exists + if _, err := os.Stat(filePath); os.IsNotExist(err) { + err := os.MkdirAll(filePath, 0755) + if err != nil { + return err + } + } + + fullPath := MakePath(filePath, fileName) + + file, err := os.Create(fullPath) + if err != nil { + return err + } + defer file.Close() + + _, err = file.WriteString(fmt.Sprintf("%v\n", data)) + if err != nil { + return err + } + + return nil +} + +func MakePath(filePath string, fileName string) string { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + return fmt.Sprintf("%s/%s", filePath, fileName) +} From 1f6e0e72e4c750e458ef109ec8f8303b6e5fc98c Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Mon, 12 Aug 2024 01:59:53 +0100 Subject: [PATCH 11/49] Updated gopsutil version (#11507) (#11562) - Updated gopsutil version as it has improvements in getting processes and memory info. --- erigon-lib/common/disk/disk.go | 2 +- erigon-lib/common/disk/disk_linux.go | 3 +-- erigon-lib/common/mem/common.go | 6 +++--- erigon-lib/common/mem/mem.go | 2 +- erigon-lib/common/mem/mem_linux.go | 2 +- erigon-lib/diagnostics/sys_info.go | 6 +++--- erigon-lib/go.mod | 4 ++-- erigon-lib/go.sum | 13 ++++--------- go.mod | 4 ++-- go.sum | 11 ++++------- 10 files changed, 22 insertions(+), 31 deletions(-) diff --git a/erigon-lib/common/disk/disk.go b/erigon-lib/common/disk/disk.go index d5f7cd0975b..171c4da191c 100644 --- a/erigon-lib/common/disk/disk.go +++ b/erigon-lib/common/disk/disk.go @@ -6,7 +6,7 @@ import ( "os" "runtime" - "github.com/shirou/gopsutil/v3/process" + "github.com/shirou/gopsutil/v4/process" "github.com/ledgerwatch/erigon-lib/metrics" ) diff --git a/erigon-lib/common/disk/disk_linux.go b/erigon-lib/common/disk/disk_linux.go index 07062fa9c9a..96e5b0a9ff5 100644 --- a/erigon-lib/common/disk/disk_linux.go +++ b/erigon-lib/common/disk/disk_linux.go @@ -6,9 +6,8 @@ import ( "os" "runtime" - "github.com/shirou/gopsutil/v3/process" - "github.com/ledgerwatch/erigon-lib/metrics" + "github.com/shirou/gopsutil/v4/process" ) var ( diff --git a/erigon-lib/common/mem/common.go b/erigon-lib/common/mem/common.go index 0396da6a064..230198ddeb8 100644 --- a/erigon-lib/common/mem/common.go +++ b/erigon-lib/common/mem/common.go @@ -3,16 +3,16 @@ package mem import ( "context" "errors" + "reflect" "runtime" "time" - "github.com/ledgerwatch/log/v3" - "github.com/shirou/gopsutil/v3/process" - "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/log/v3" + "github.com/shirou/gopsutil/v4/process" ) var ErrorUnsupportedPlatform = errors.New("unsupported platform") diff --git a/erigon-lib/common/mem/mem.go b/erigon-lib/common/mem/mem.go index 968872449cd..c6be2c7178a 100644 --- a/erigon-lib/common/mem/mem.go +++ b/erigon-lib/common/mem/mem.go @@ -5,7 +5,7 @@ package mem import ( "errors" - "github.com/shirou/gopsutil/v3/process" + "github.com/shirou/gopsutil/v4/process" ) func ReadVirtualMemStats() (process.MemoryMapsStat, error) { diff --git a/erigon-lib/common/mem/mem_linux.go b/erigon-lib/common/mem/mem_linux.go index 7bdbcf873df..843e3222e6e 100644 --- a/erigon-lib/common/mem/mem_linux.go +++ b/erigon-lib/common/mem/mem_linux.go @@ -6,7 +6,7 @@ import ( "os" "reflect" - "github.com/shirou/gopsutil/v3/process" + "github.com/shirou/gopsutil/v4/process" "github.com/ledgerwatch/erigon-lib/metrics" ) diff --git a/erigon-lib/diagnostics/sys_info.go b/erigon-lib/diagnostics/sys_info.go index 97620910ff1..dce234c007b 100644 --- a/erigon-lib/diagnostics/sys_info.go +++ b/erigon-lib/diagnostics/sys_info.go @@ -20,9 +20,9 @@ import ( "encoding/json" "io" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/disk" - "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/disk" + "github.com/shirou/gopsutil/v4/mem" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/diskutils" diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index badff03bf90..7ef9be1aa4a 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -33,7 +33,7 @@ require ( github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_model v0.6.1 github.com/quasilyte/go-ruleguard/dsl v0.3.22 - github.com/shirou/gopsutil/v3 v3.24.3 + github.com/shirou/gopsutil/v4 v4.24.7 github.com/spaolacci/murmur3 v1.1.0 github.com/stretchr/testify v1.9.0 github.com/tidwall/btree v1.6.0 @@ -41,7 +41,7 @@ require ( golang.org/x/crypto v0.22.0 golang.org/x/exp v0.0.0-20231226003508-02704c960a9b golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.22.0 golang.org/x/time v0.5.0 google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index bfcd00e3b31..b50c4a05786 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -216,7 +216,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -411,8 +410,8 @@ github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1 github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE= -github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg= +github.com/shirou/gopsutil/v4 v4.24.7 h1:V9UGTK4gQ8HvcnPKf6Zt3XHyQq/peaekfxpJ2HSocJk= +github.com/shirou/gopsutil/v4 v4.24.7/go.mod h1:0uW/073rP7FYLOkvxolUQM5rMOLTNmRXnFKafpb71rw= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -451,10 +450,8 @@ github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EU github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -583,11 +580,9 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/go.mod b/go.mod index 9efebeb59fd..28d5efbbbfa 100644 --- a/go.mod +++ b/go.mod @@ -98,7 +98,7 @@ require ( golang.org/x/exp v0.0.0-20231226003508-02704c960a9b golang.org/x/net v0.24.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.22.0 golang.org/x/time v0.5.0 google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 @@ -259,7 +259,7 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/shirou/gopsutil/v3 v3.24.3 // indirect + github.com/shirou/gopsutil/v4 v4.24.7 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index a50321c625d..286fb740e09 100644 --- a/go.sum +++ b/go.sum @@ -815,8 +815,8 @@ github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE= -github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg= +github.com/shirou/gopsutil/v4 v4.24.7 h1:V9UGTK4gQ8HvcnPKf6Zt3XHyQq/peaekfxpJ2HSocJk= +github.com/shirou/gopsutil/v4 v4.24.7/go.mod h1:0uW/073rP7FYLOkvxolUQM5rMOLTNmRXnFKafpb71rw= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -900,10 +900,8 @@ github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EU github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= @@ -1198,9 +1196,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 83482a4df4ad2c1f47b1fd6122f4894936ba9511 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Mon, 12 Aug 2024 08:43:53 +0100 Subject: [PATCH 12/49] don't use lfs for consensus spec tests (#11545) (#11552) cherry-pick 391fc4b94c6f110bad11246f3e5a7ba948a0087e for E2 needed to unblock PR https://github.com/erigontech/erigon/pull/11551 Co-authored-by: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> From a1e7362c7cef1e9a29dd4ef45cb4e7b611192b84 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Mon, 12 Aug 2024 12:03:14 +0100 Subject: [PATCH 13/49] stagedsync: add dbg.SaveHeapProfileNearOOM to headers stage (#11549) (#11551) cherry-pick 2a98f6aa53ccd558543bc95ffe9bf0fad4ef278f for E2 relates to: https://github.com/erigontech/erigon/issues/10734 https://github.com/erigontech/erigon/issues/11387 restart Erigon with `SAVE_HEAP_PROFILE = true` env variable wait until we reach 45% or more alloc in stage_headers when "noProgressCounter >= 5" or "Rejected header marked as bad" --- erigon-lib/common/dbg/experiments.go | 29 +++++++++++++++++++++ eth/stagedsync/stage_headers.go | 1 + turbo/stages/headerdownload/header_algos.go | 7 +++-- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go index 56a115ab441..787fd57ca11 100644 --- a/erigon-lib/common/dbg/experiments.go +++ b/erigon-lib/common/dbg/experiments.go @@ -18,17 +18,22 @@ package dbg import ( "os" + "path/filepath" "runtime" + "runtime/pprof" "strconv" "sync" "time" "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon-lib/mmap" ) var ( // force skipping of any non-Erigon2 .torrent files DownloaderOnlyBlocks = EnvBool("DOWNLOADER_ONLY_BLOCKS", false) + saveHeapProfile = EnvBool("SAVE_HEAP_PROFILE", false) ) var StagesOnlyBlocks = EnvBool("STAGES_ONLY_BLOCKS", false) @@ -320,3 +325,27 @@ func LogHashMismatchReason() bool { }) return logHashMismatchReason } + +func SaveHeapProfileNearOOM() { + if !saveHeapProfile { + return + } + + var m runtime.MemStats + ReadMemStats(&m) + if m.Alloc < (mmap.TotalMemory()/100)*45 { + return + } + + // above 45% + filePath := filepath.Join(os.TempDir(), "erigon-mem.prof") + log.Info("[Experiment] saving heap profile as near OOM", "alloc", m.Alloc, "filePath", filePath) + + f, _ := os.Create(filePath) + defer func() { + _ = f.Close() + }() + + runtime.GC() + _ = pprof.WriteHeapProfile(f) +} diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 59e382b4b7e..cef3cafcc99 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -307,6 +307,7 @@ Loop: logger.Info("Req/resp stats", "req", stats.Requests, "reqMin", stats.ReqMinBlock, "reqMax", stats.ReqMaxBlock, "skel", stats.SkeletonRequests, "skelMin", stats.SkeletonReqMinBlock, "skelMax", stats.SkeletonReqMaxBlock, "resp", stats.Responses, "respMin", stats.RespMinBlock, "respMax", stats.RespMaxBlock, "dups", stats.Duplicates) + dbg.SaveHeapProfileNearOOM() cfg.hd.LogAnchorState() if wasProgress { logger.Warn("Looks like chain is not progressing, moving to the next stage") diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c7c0184b8e5..739c5257d95 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -15,6 +15,7 @@ import ( "strings" "time" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon-lib/kv/dbutils" @@ -104,7 +105,8 @@ func (hd *HeaderDownload) SingleHeaderAsSegment(headerRaw []byte, header *types. headerHash := types.RawRlpHash(headerRaw) if _, bad := hd.badHeaders[headerHash]; bad { hd.stats.RejectedBadHeaders++ - hd.logger.Warn("[downloader] Rejected header marked as bad", "hash", headerHash, "height", header.Number.Uint64()) + dbg.SaveHeapProfileNearOOM() + hd.logger.Warn("[downloader] SingleHeaderAsSegment: Rejected header marked as bad", "hash", headerHash, "height", header.Number.Uint64()) return nil, BadBlockPenalty, nil } if penalizePoSBlocks && header.Difficulty.Sign() == 0 { @@ -517,7 +519,8 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult hd.removeUpwards(link) dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderBad) hd.stats.RejectedBadHeaders++ - hd.logger.Warn("[downloader] Rejected header marked as bad", "hash", link.hash, "height", link.blockHeight) + dbg.SaveHeapProfileNearOOM() + hd.logger.Warn("[downloader] InsertHeader: Rejected header marked as bad", "hash", link.hash, "height", link.blockHeight) return true, false, 0, lastTime, nil } if !link.verified { From 250c70f250e86e64efd9d8502feac0c4989db77d Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Mon, 12 Aug 2024 14:43:35 +0100 Subject: [PATCH 14/49] diagnostics: export system info (#11567) Changes: - Collecting CPU and Memory usage info about all processes running on the machine - Running loop 5 times with 2 seconds delay and to calculate average - Sort by CPU usage - Write result to report file - Display totals for CPU and Memory usage to processes table - Display CPU usage by cores - Print CPU details to table Cherry pick from: - #11516 - #11526 - #11537 - #11544 --- cmd/diag/downloader/diag_downloader.go | 21 ++- cmd/diag/stages/stages.go | 3 +- cmd/diag/sysinfo/sysinfo.go | 128 ++++++++++----- cmd/diag/util/util.go | 36 ++++- erigon-lib/sysutils/sysutils.go | 212 +++++++++++++++++++++++++ erigon-lib/sysutils/sysutils_test.go | 68 ++++++++ 6 files changed, 412 insertions(+), 56 deletions(-) create mode 100644 erigon-lib/sysutils/sysutils.go create mode 100644 erigon-lib/sysutils/sysutils_test.go diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index 4988f719ad7..671ff96906e 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -74,10 +74,11 @@ func printDownloadStatus(cliCtx *cli.Context) error { util.RenderJson(snapshotDownloadStatus) case "text": - util.RenderTableWithHeader( + util.PrintTable( "Snapshot download info:", table.Row{"Status", "Progress", "Downloaded", "Total", "Time Left", "Total Time", "Download Rate", "Upload Rate", "Peers", "Files", "Connections", "Alloc", "Sys"}, []table.Row{snapshotDownloadStatus}, + nil, ) } @@ -115,17 +116,19 @@ func printFiles(cliCtx *cli.Context) error { util.RenderJson(filteredRows) case "text": //Print overall status - util.RenderTableWithHeader( + util.PrintTable( "Snapshot download info:", table.Row{"Status", "Progress", "Downloaded", "Total", "Time Left", "Total Time", "Download Rate", "Upload Rate", "Peers", "Files", "Connections", "Alloc", "Sys"}, []table.Row{snapshotDownloadStatus}, + nil, ) //Print files status - util.RenderTableWithHeader( + util.PrintTable( "Files download info:", table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, filteredRows, + nil, ) } @@ -150,10 +153,11 @@ func printFile(cliCtx *cli.Context) error { util.RenderJson(fileRow) case "text": //Print file status - util.RenderTableWithHeader( + util.PrintTable( "File download info:", table.Row{"File", "Size", "Average Download Rate", "Time Took"}, []table.Row{fileRow}, + nil, ) } } else { @@ -168,23 +172,26 @@ func printFile(cliCtx *cli.Context) error { util.RenderJson(fileWebseeds) case "text": //Print file status - util.RenderTableWithHeader( + util.PrintTable( "file download info:", table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, []table.Row{fileRow}, + nil, ) //Print peers and webseeds status - util.RenderTableWithHeader( + util.PrintTable( "", table.Row{"Peer", "Download Rate"}, filePeers, + nil, ) - util.RenderTableWithHeader( + util.PrintTable( "", table.Row{"Webseed", "Download Rate"}, fileWebseeds, + nil, ) } } diff --git a/cmd/diag/stages/stages.go b/cmd/diag/stages/stages.go index a2b74468b09..f73b2959f79 100644 --- a/cmd/diag/stages/stages.go +++ b/cmd/diag/stages/stages.go @@ -133,10 +133,11 @@ func printData(cliCtx *cli.Context, data []table.Row) { util.RenderJson(data) case "text": - util.RenderTableWithHeader( + util.PrintTable( "", table.Row{"Stage", "SubStage", "Status", "Time Elapsed", "Progress"}, data, + nil, ) } } diff --git a/cmd/diag/sysinfo/sysinfo.go b/cmd/diag/sysinfo/sysinfo.go index 6999afb5ad3..94105d3af60 100644 --- a/cmd/diag/sysinfo/sysinfo.go +++ b/cmd/diag/sysinfo/sysinfo.go @@ -18,12 +18,14 @@ package sysinfo import ( "fmt" - "strconv" + "sort" "strings" + "github.com/jedib0t/go-pretty/v6/table" "github.com/urfave/cli/v2" "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/erigon-lib/sysutils" "github.com/ledgerwatch/erigon/cmd/diag/flags" "github.com/ledgerwatch/erigon/cmd/diag/util" ) @@ -59,18 +61,30 @@ var Command = cli.Command{ Description: "Collect information about system and save it to file in order to provide to support person", } +type SortType int + +const ( + SortByCPU SortType = iota + SortByMemory + SortByPID +) + func collectInfo(cliCtx *cli.Context) error { data, err := getData(cliCtx) if err != nil { util.RenderError(err) } + cpuusage := sysutils.CPUUsage() + processes := sysutils.GetProcessesInfo() + totalMemory := sysutils.TotalMemoryUsage() + var builder strings.Builder - builder.WriteString("Disk info:\n") - builder.WriteString(data.Disk.Details) - builder.WriteString("\n\n") - builder.WriteString("CPU info:\n") - writeCPUToStringBuilder(data.CPU, &builder) + + writeDiskInfoToStringBuilder(data.Disk, &builder) + writeCPUInfoToStringBuilder(data.CPU, cpuusage, &builder) + + writeProcessesToStringBuilder(processes, cpuusage.Total, totalMemory, &builder) // Save data to file err = util.SaveDataToFile(cliCtx.String(ExportPathFlag.Name), cliCtx.String(ExportFileNameFlag.Name), builder.String()) @@ -81,53 +95,87 @@ func collectInfo(cliCtx *cli.Context) error { return nil } -func writeCPUToStringBuilder(cpuInfo []diagnostics.CPUInfo, builder *strings.Builder) { - spacing := calculateSpacing([]string{"CPU", "VendorID", "Family", "Model", "Stepping", "PhysicalID", "CoreID", "Cores", "ModelName", "Mhz", "CacheSize", "Flags", "Microcode"}) +func writeDiskInfoToStringBuilder(diskInfo diagnostics.DiskInfo, builder *strings.Builder) { + builder.WriteString("Disk info:\n") + builder.WriteString(diskInfo.Details) + builder.WriteString("\n\n") +} +func writeCPUInfoToStringBuilder(cpuInfo []diagnostics.CPUInfo, cpuusage sysutils.CPUUsageInfo, builder *strings.Builder) { + writeOweralCPUInfoToStringBuilder(cpuInfo, builder) + writeCPUUsageToStringBuilder(cpuusage.Cores, builder) +} + +func writeOweralCPUInfoToStringBuilder(cpuInfo []diagnostics.CPUInfo, builder *strings.Builder) { + builder.WriteString("CPU info:\n") + header := table.Row{"CPU", "VendorID", "Family", "Model", "Stepping", "PhysicalID", "CoreID", "Cores", "ModelName", "Mhz", "CacheSize", "Flags", "Microcode"} + rows := make([]table.Row, 0, len(cpuInfo)) for _, cpu := range cpuInfo { - writeStringToBuilder(builder, "CPU", strconv.Itoa(int(cpu.CPU)), spacing) - writeStringToBuilder(builder, "VendorID", cpu.VendorID, spacing) - writeStringToBuilder(builder, "Family", cpu.Family, spacing) - writeStringToBuilder(builder, "Model", cpu.Model, spacing) - writeStringToBuilder(builder, "Stepping", strconv.Itoa(int(cpu.Stepping)), spacing) - writeStringToBuilder(builder, "PhysicalID", cpu.PhysicalID, spacing) - writeStringToBuilder(builder, "CoreID", cpu.CoreID, spacing) - writeStringToBuilder(builder, "Cores", strconv.Itoa(int(cpu.Cores)), spacing) - writeStringToBuilder(builder, "ModelName", cpu.ModelName, spacing) - writeStringToBuilder(builder, "Mhz", fmt.Sprintf("%g", cpu.Mhz), spacing) - writeStringToBuilder(builder, "CacheSize", strconv.Itoa(int(cpu.CacheSize)), spacing) - writeStringToBuilder(builder, "Flags", strings.Join(cpu.Flags, ", "), spacing) - writeStringToBuilder(builder, "Microcode", cpu.Microcode, spacing) + rows = append(rows, table.Row{cpu.CPU, cpu.VendorID, cpu.Family, cpu.Model, cpu.Stepping, cpu.PhysicalID, cpu.CoreID, cpu.Cores, cpu.ModelName, cpu.Mhz, cpu.CacheSize, strings.Join(cpu.Flags, ", "), cpu.Microcode}) } + + cpuDataTable := util.ExportTable(header, rows, nil) + builder.WriteString(cpuDataTable) + builder.WriteString("\n\n") } -func calculateSpacing(keysArray []string) int { - max := 0 - for _, key := range keysArray { - if len(key) > max { - max = len(key) - } +func writeCPUUsageToStringBuilder(cpuUsage []float64, builder *strings.Builder) { + builder.WriteString("CPU usage:\n") + header := table.Row{"Core #", "% CPU"} + rows := make([]table.Row, 0, len(cpuUsage)) + for idx, core := range cpuUsage { + rows = append(rows, table.Row{idx + 1, fmt.Sprintf("%.2f", core)}) } - return max + 3 + cpuUsageDataTable := util.ExportTable(header, rows, nil) + builder.WriteString(cpuUsageDataTable) } -func writeStringToBuilder(result *strings.Builder, name string, value string, spacing int) { - marging := 3 - if value == "" { - value = "N/A" +func writeProcessesToStringBuilder(prcInfo []*sysutils.ProcessInfo, cpuUsage float64, totalMemory float64, builder *strings.Builder) { + builder.WriteString("\n\nProcesses info:\n") + + prcInfo = sortProcessesByCPU(prcInfo) + rows := make([]table.Row, 0) + header := table.Row{"PID", "Name", "% CPU", "% Memory"} + + for _, process := range prcInfo { + cpu := fmt.Sprintf("%.2f", process.CPUUsage) + memory := fmt.Sprintf("%.2f", process.Memory) + rows = append(rows, table.Row{process.Pid, process.Name, cpu, memory}) } - writeSpacesToBuilder(result, marging) - result.WriteString(name) - result.WriteString(":") - writeSpacesToBuilder(result, spacing-len(name)-1) - result.WriteString(value) - result.WriteString("\n") + footer := table.Row{"Totals", "", fmt.Sprintf("%.2f", cpuUsage), fmt.Sprintf("%.2f", totalMemory)} + + processesTable := util.ExportTable(header, rows, footer) + builder.WriteString(processesTable) +} + +func sortProcesses(prcInfo []*sysutils.ProcessInfo, sorting SortType) []*sysutils.ProcessInfo { + sort.Slice(prcInfo, func(i, j int) bool { + switch sorting { + case SortByCPU: + return prcInfo[i].CPUUsage > prcInfo[j].CPUUsage + case SortByMemory: + return prcInfo[i].Memory > prcInfo[j].Memory + default: + return prcInfo[i].Pid < prcInfo[j].Pid + } + + }) + + return prcInfo +} + +func sortProcessesByCPU(prcInfo []*sysutils.ProcessInfo) []*sysutils.ProcessInfo { + return sortProcesses(prcInfo, SortByCPU) +} + +func sortProcessesByMemory(prcInfo []*sysutils.ProcessInfo) []*sysutils.ProcessInfo { + return sortProcesses(prcInfo, SortByMemory) } -func writeSpacesToBuilder(result *strings.Builder, spaces int) { - result.WriteString(strings.Repeat(" ", spaces)) +func sortProcessesByPID(prcInfo []*sysutils.ProcessInfo) []*sysutils.ProcessInfo { + return sortProcesses(prcInfo, SortByPID) } func getData(cliCtx *cli.Context) (diagnostics.HardwareInfo, error) { diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index 72d40f29bee..daf0acae2d1 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -59,7 +59,16 @@ func RenderJson(data interface{}) { } } -func RenderTableWithHeader(title string, header table.Row, rows []table.Row) { +func ExportTable(header table.Row, rows []table.Row, footer table.Row) string { + if len(rows) > 0 { + t := CreateTable(header, rows, footer) + return t.Render() + } + + return "" +} + +func PrintTable(title string, header table.Row, rows []table.Row, footer table.Row) { if title != "" { txt := text.Colors{text.FgBlue, text.Bold} fmt.Println(txt.Sprint(title)) @@ -71,19 +80,30 @@ func RenderTableWithHeader(title string, header table.Row, rows []table.Row) { } if len(rows) > 0 { - t := table.NewWriter() + t := CreateTable(header, rows, footer) t.SetOutputMirror(os.Stdout) + t.Render() + } + + fmt.Print("\n") +} + +func CreateTable(header table.Row, rows []table.Row, footer table.Row) table.Writer { + t := table.NewWriter() + if header != nil { t.AppendHeader(header) - if len(rows) > 0 { - t.AppendRows(rows) - } + } - t.AppendSeparator() - t.Render() + if len(rows) > 0 { + t.AppendRows(rows) } - fmt.Print("\n") + if footer != nil { + t.AppendFooter(footer) + } + + return t } func RenderUseDiagUI() { diff --git a/erigon-lib/sysutils/sysutils.go b/erigon-lib/sysutils/sysutils.go new file mode 100644 index 00000000000..72011ff6967 --- /dev/null +++ b/erigon-lib/sysutils/sysutils.go @@ -0,0 +1,212 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package sysutils + +import ( + "time" + + "github.com/ledgerwatch/log/v3" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/process" +) + +type ProcessInfo struct { + Pid int32 + Name string + CPUUsage float64 + Memory float32 +} + +type ProcessMerge struct { + CPUUsage float64 + Memory float32 + Times int + Name string +} + +type CPUUsageInfo struct { + Total float64 + Cores []float64 +} + +const ( + iterations = 5 + sleepSeconds = 2 + usageThreshold = 0.05 +) + +func GetProcessesInfo() []*ProcessInfo { + procs, err := process.Processes() + if err != nil { + log.Debug("[Sysutil] Error retrieving processes: %v", err) + } + + return averageProceses(procs) +} + +func AverageProceses(procs []*process.Process) []*ProcessInfo { + return averageProceses(procs) +} + +func averageProceses(procs []*process.Process) []*ProcessInfo { + // Collect processes and calculate average stats. + allProcsRepeats := make([][]*ProcessInfo, 0, iterations) + + // Collect all processes N times with a delay of N seconds to calculate average stats. + for i := 0; i < iterations; i++ { + processes := allProcesses(procs) + allProcsRepeats = append(allProcsRepeats, processes) + time.Sleep(sleepSeconds * time.Second) + } + + // Calculate average stats. + averageProcs := mergeProcesses(allProcsRepeats) + averageProcs = removeProcessesBelowThreshold(averageProcs, usageThreshold) + + return averageProcs +} + +func RemoveProcessesBelowThreshold(processes []*ProcessInfo, treshold float64) []*ProcessInfo { + return removeProcessesBelowThreshold(processes, treshold) +} + +func removeProcessesBelowThreshold(processes []*ProcessInfo, treshold float64) []*ProcessInfo { + // remove processes with CPU or Memory usage less than threshold + filtered := make([]*ProcessInfo, 0, len(processes)) + for _, p := range processes { + if p.CPUUsage >= treshold || p.Memory >= float32(treshold) { + filtered = append(filtered, p) + } + } + + return filtered +} + +func MergeProcesses(allProcsRepeats [][]*ProcessInfo) []*ProcessInfo { + return mergeProcesses(allProcsRepeats) +} + +func mergeProcesses(allProcsRepeats [][]*ProcessInfo) []*ProcessInfo { + if len(allProcsRepeats) == 0 || len(allProcsRepeats[0]) == 0 { + return nil + } + + repeats := len(allProcsRepeats) + if repeats == 1 { + return allProcsRepeats[0] + } + + prcmap := make(map[int32]*ProcessMerge) + + for _, procList := range allProcsRepeats { + for _, proc := range procList { + if prc, exists := prcmap[proc.Pid]; exists { + prc.CPUUsage += proc.CPUUsage + prc.Memory += proc.Memory + prc.Times++ + } else { + prcmap[proc.Pid] = &ProcessMerge{ + CPUUsage: proc.CPUUsage, + Memory: proc.Memory, + Times: 1, + Name: proc.Name, + } + } + } + } + + resultArray := make([]*ProcessInfo, 0, len(prcmap)) + + for pid, prc := range prcmap { + resultArray = append(resultArray, &ProcessInfo{ + Pid: pid, + Name: prc.Name, + CPUUsage: prc.CPUUsage / float64(prc.Times), + Memory: prc.Memory / float32(prc.Times), + }) + } + + return resultArray +} + +func allProcesses(procs []*process.Process) []*ProcessInfo { + processes := make([]*ProcessInfo, 0) + + for _, proc := range procs { + pid := proc.Pid + name, err := proc.Name() + if err != nil { + name = "Unknown" + } + + //remove gopls process as it is what we use to get info + if name == "gopls" { + continue + } + + cpuPercent, err := proc.CPUPercent() + if err != nil { + log.Trace("[Sysutil] Error retrieving CPU percent for PID %d: %v Name: %s", pid, err, name) + continue + } + + memPercent, err := proc.MemoryPercent() + if err != nil { + log.Trace("[Sysutil] Error retrieving memory percent for PID %d: %v Name: %s", pid, err, name) + continue + } + + processes = append(processes, &ProcessInfo{Pid: pid, Name: name, CPUUsage: cpuPercent, Memory: memPercent}) + } + + return processes +} + +func TotalCPUUsage() float64 { + totalCPUPercent, err := cpu.Percent(time.Second, false) + if err != nil { + log.Debug("[Sysutil] Error retrieving total CPU usage: %v", err) + } + + return float64(totalCPUPercent[0]) +} + +func CPUUsageByCores() []float64 { + cpuPercent, err := cpu.Percent(time.Second, true) + if err != nil { + log.Debug("[Sysutil] Error retrieving CPU usage by cores: %v", err) + } + + return cpuPercent +} + +func CPUUsage() CPUUsageInfo { + cpuPercent := CPUUsageByCores() + totalCPU := TotalCPUUsage() + + return CPUUsageInfo{Total: totalCPU, Cores: cpuPercent} +} + +func TotalMemoryUsage() float64 { + totalMemory, err := mem.VirtualMemory() + if err != nil { + log.Debug("[Sysutil] Error retrieving total memory usage: %v", err) + } + + return float64(totalMemory.UsedPercent) +} diff --git a/erigon-lib/sysutils/sysutils_test.go b/erigon-lib/sysutils/sysutils_test.go new file mode 100644 index 00000000000..3bea1076295 --- /dev/null +++ b/erigon-lib/sysutils/sysutils_test.go @@ -0,0 +1,68 @@ +package sysutils_test + +import ( + "testing" + + "github.com/ledgerwatch/erigon-lib/sysutils" + "github.com/stretchr/testify/require" +) + +func TestMergeProcesses(t *testing.T) { + initaldata := [][]*sysutils.ProcessInfo{ + { + {Pid: 1, Name: "test1", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 2, Name: "test2", CPUUsage: 2.0, Memory: 2.0}, + {Pid: 3, Name: "test3", CPUUsage: 3.0, Memory: 3.0}, + {Pid: 31, Name: "test31", CPUUsage: 3.0, Memory: 3.0}, + }, + { + {Pid: 1, Name: "test1", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 2, Name: "test2", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 22, Name: "test4", CPUUsage: 1.0, Memory: 1.0}, + }, + } + + expected := []*sysutils.ProcessInfo{ + {Pid: 1, Name: "test1", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 2, Name: "test2", CPUUsage: 1.5, Memory: 1.5}, + {Pid: 3, Name: "test3", CPUUsage: 3.0, Memory: 3.0}, + {Pid: 31, Name: "test31", CPUUsage: 3.0, Memory: 3.0}, + {Pid: 22, Name: "test4", CPUUsage: 1.0, Memory: 1.0}, + } + + result := sysutils.MergeProcesses(initaldata) + for _, proc := range result { + require.Contains(t, expected, proc) + } +} + +func TestRemoveProcessesBelowThreshold(t *testing.T) { + initaldata := [][]*sysutils.ProcessInfo{ + { + {Pid: 1, Name: "test1", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 2, Name: "test2", CPUUsage: 2.0, Memory: 2.0}, + {Pid: 3, Name: "test3", CPUUsage: 3.0, Memory: 3.0}, + {Pid: 12, Name: "test5", CPUUsage: 0.001, Memory: 1.0}, + {Pid: 45, Name: "test8", CPUUsage: 0.001, Memory: 0.0}, + }, + { + {Pid: 1, Name: "test1", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 2, Name: "test2", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 22, Name: "test4", CPUUsage: 1.0, Memory: 0.001}, + }, + } + + expected := []*sysutils.ProcessInfo{ + {Pid: 1, Name: "test1", CPUUsage: 1.0, Memory: 1.0}, + {Pid: 2, Name: "test2", CPUUsage: 1.5, Memory: 1.5}, + {Pid: 3, Name: "test3", CPUUsage: 3.0, Memory: 3.0}, + {Pid: 22, Name: "test4", CPUUsage: 1.0, Memory: 0.001}, + {Pid: 12, Name: "test5", CPUUsage: 0.001, Memory: 1.0}, + } + + result := sysutils.MergeProcesses(initaldata) + result = sysutils.RemoveProcessesBelowThreshold(result, 0.01) + for _, proc := range result { + require.Contains(t, expected, proc) + } +} From f0e60131b2a4c817e9e6ba1b03744948a6412226 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Mon, 12 Aug 2024 17:48:47 +0100 Subject: [PATCH 15/49] diagnostics: added flags to report (#11548) (#11570) - added flags which was applied to run command to report --- cmd/diag/sysinfo/sysinfo.go | 52 +++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/cmd/diag/sysinfo/sysinfo.go b/cmd/diag/sysinfo/sysinfo.go index 94105d3af60..862b9a195cb 100644 --- a/cmd/diag/sysinfo/sysinfo.go +++ b/cmd/diag/sysinfo/sysinfo.go @@ -61,6 +61,13 @@ var Command = cli.Command{ Description: "Collect information about system and save it to file in order to provide to support person", } +type Flag struct { + Name string `json:"name"` + Value interface{} `json:"value"` + Usage string `json:"usage"` + Default bool `json:"default"` +} + type SortType int const ( @@ -75,11 +82,19 @@ func collectInfo(cliCtx *cli.Context) error { util.RenderError(err) } + flagsData, err := getFlagsData(cliCtx) + if err != nil { + util.RenderError(err) + } + cpuusage := sysutils.CPUUsage() processes := sysutils.GetProcessesInfo() totalMemory := sysutils.TotalMemoryUsage() var builder strings.Builder + builder.WriteString("Flags applied by launch command:\n") + writeFlagsInfoToStringBuilder(flagsData, &builder) + builder.WriteString("\n\n") writeDiskInfoToStringBuilder(data.Disk, &builder) writeCPUInfoToStringBuilder(data.CPU, cpuusage, &builder) @@ -95,6 +110,15 @@ func collectInfo(cliCtx *cli.Context) error { return nil } +func writeFlagsInfoToStringBuilder(flags []Flag, builder *strings.Builder) { + flagsRows := make([]table.Row, 0, len(flags)) + for _, flag := range flags { + flagsRows = append(flagsRows, table.Row{flag.Name, flag.Value}) + } + flagsTableData := util.ExportTable(table.Row{"Flag", "Value"}, flagsRows, nil) + builder.WriteString(flagsTableData) +} + func writeDiskInfoToStringBuilder(diskInfo diagnostics.DiskInfo, builder *strings.Builder) { builder.WriteString("Disk info:\n") builder.WriteString(diskInfo.Details) @@ -190,3 +214,31 @@ func getData(cliCtx *cli.Context) (diagnostics.HardwareInfo, error) { return data, nil } + +func getFlagsData(cliCtx *cli.Context) ([]Flag, error) { + var rawData map[string]map[string]interface{} + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/flags" + + err := util.MakeHttpGetCall(cliCtx.Context, url, &rawData) + + if err != nil { + return nil, err + } + + flagItems := make([]Flag, 0, len(rawData)) + for name, item := range rawData { + if item["default"].(bool) { + continue + } + + flagItem := Flag{ + Name: name, + Value: item["value"], + Usage: item["usage"].(string), + Default: item["default"].(bool), + } + flagItems = append(flagItems, flagItem) + } + + return flagItems, nil +} From 9b19cd542008d4de3eb267df3c606b2203284ed6 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:59:37 +0100 Subject: [PATCH 16/49] dbg: add save heap options for logger and memstats inputs (#11576) cherry-pick bf388394b0acb09d3ae0f941e680d1234164bcc3 in E2 --- erigon-lib/common/dbg/experiments.go | 71 ++++++++++++++++++--- eth/stagedsync/stage_headers.go | 2 +- turbo/stages/headerdownload/header_algos.go | 4 +- 3 files changed, 66 insertions(+), 11 deletions(-) diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go index 787fd57ca11..7dfe8008cbf 100644 --- a/erigon-lib/common/dbg/experiments.go +++ b/erigon-lib/common/dbg/experiments.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/log/v3" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/mmap" ) @@ -326,26 +327,80 @@ func LogHashMismatchReason() bool { return logHashMismatchReason } -func SaveHeapProfileNearOOM() { +type saveHeapOptions struct { + memStats *runtime.MemStats + logger *log.Logger +} + +type SaveHeapOption func(options *saveHeapOptions) + +func SaveHeapWithMemStats(memStats *runtime.MemStats) SaveHeapOption { + return func(options *saveHeapOptions) { + options.memStats = memStats + } +} + +func SaveHeapWithLogger(logger *log.Logger) SaveHeapOption { + return func(options *saveHeapOptions) { + options.logger = logger + } +} + +func SaveHeapProfileNearOOM(opts ...SaveHeapOption) { if !saveHeapProfile { return } - var m runtime.MemStats - ReadMemStats(&m) - if m.Alloc < (mmap.TotalMemory()/100)*45 { + var options saveHeapOptions + for _, opt := range opts { + opt(&options) + } + + var logger log.Logger + if options.logger != nil { + logger = *options.logger + } + + var memStats runtime.MemStats + if options.memStats != nil { + memStats = *options.memStats + } else { + ReadMemStats(&memStats) + } + + totalMemory := mmap.TotalMemory() + if logger != nil { + logger.Info( + "[Experiment] heap profile threshold check", + "alloc", libcommon.ByteCount(memStats.Alloc), + "total", libcommon.ByteCount(totalMemory), + ) + } + if memStats.Alloc < (totalMemory/100)*45 { return } // above 45% filePath := filepath.Join(os.TempDir(), "erigon-mem.prof") - log.Info("[Experiment] saving heap profile as near OOM", "alloc", m.Alloc, "filePath", filePath) + if logger != nil { + logger.Info("[Experiment] saving heap profile as near OOM", "filePath", filePath) + } + + f, err := os.Create(filePath) + if err != nil && logger != nil { + logger.Warn("[Experiment] could not create heap profile file", "err", err) + } - f, _ := os.Create(filePath) defer func() { - _ = f.Close() + err := f.Close() + if err != nil && logger != nil { + logger.Warn("[Experiment] could not close heap profile file", "err", err) + } }() runtime.GC() - _ = pprof.WriteHeapProfile(f) + err = pprof.WriteHeapProfile(f) + if err != nil && logger != nil { + logger.Warn("[Experiment] could not write heap profile file", "err", err) + } } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index cef3cafcc99..f29bf2f4275 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -307,7 +307,7 @@ Loop: logger.Info("Req/resp stats", "req", stats.Requests, "reqMin", stats.ReqMinBlock, "reqMax", stats.ReqMaxBlock, "skel", stats.SkeletonRequests, "skelMin", stats.SkeletonReqMinBlock, "skelMax", stats.SkeletonReqMaxBlock, "resp", stats.Responses, "respMin", stats.RespMinBlock, "respMax", stats.RespMaxBlock, "dups", stats.Duplicates) - dbg.SaveHeapProfileNearOOM() + dbg.SaveHeapProfileNearOOM(dbg.SaveHeapWithLogger(&logger)) cfg.hd.LogAnchorState() if wasProgress { logger.Warn("Looks like chain is not progressing, moving to the next stage") diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 739c5257d95..4604ee360bc 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -105,7 +105,7 @@ func (hd *HeaderDownload) SingleHeaderAsSegment(headerRaw []byte, header *types. headerHash := types.RawRlpHash(headerRaw) if _, bad := hd.badHeaders[headerHash]; bad { hd.stats.RejectedBadHeaders++ - dbg.SaveHeapProfileNearOOM() + dbg.SaveHeapProfileNearOOM(dbg.SaveHeapWithLogger(&hd.logger)) hd.logger.Warn("[downloader] SingleHeaderAsSegment: Rejected header marked as bad", "hash", headerHash, "height", header.Number.Uint64()) return nil, BadBlockPenalty, nil } @@ -519,7 +519,7 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult hd.removeUpwards(link) dataflow.HeaderDownloadStates.AddChange(link.blockHeight, dataflow.HeaderBad) hd.stats.RejectedBadHeaders++ - dbg.SaveHeapProfileNearOOM() + dbg.SaveHeapProfileNearOOM(dbg.SaveHeapWithLogger(&hd.logger)) hd.logger.Warn("[downloader] InsertHeader: Rejected header marked as bad", "hash", link.hash, "height", link.blockHeight) return true, false, 0, lastTime, nil } From aa6c7e6da15e7522093900487ee0cfdb556e89e1 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 12 Aug 2024 22:18:47 +0200 Subject: [PATCH 17/49] Cherry-picked essential caplin stuff (#11569) Co-authored-by: Kewei --- cl/antiquary/antiquary.go | 6 +++++- cl/phase1/stages/stage_history_download.go | 20 ++++++++++++-------- cmd/utils/flags.go | 2 +- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go index 61a03ccd130..91f363c55c1 100644 --- a/cl/antiquary/antiquary.go +++ b/cl/antiquary/antiquary.go @@ -20,7 +20,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -const safetyMargin = 2_000 // We retire snapshots 2k blocks after the finalized head +const safetyMargin = 10_000 // We retire snapshots 10k blocks after the finalized head // Antiquary is where the snapshots go, aka old history, it is what keep track of the oldest records. type Antiquary struct { @@ -304,6 +304,10 @@ func (a *Antiquary) antiquateBlobs() error { defer roTx.Rollback() // perform blob antiquation if it is time to. currentBlobsProgress := a.sn.FrozenBlobs() + // We should NEVER get ahead of the block snapshots. + if currentBlobsProgress >= a.sn.BlocksAvailable() { + return nil + } minimunBlobsProgress := ((a.cfg.DenebForkEpoch * a.cfg.SlotsPerEpoch) / snaptype.Erigon2MergeLimit) * snaptype.Erigon2MergeLimit currentBlobsProgress = utils.Max64(currentBlobsProgress, minimunBlobsProgress) // read the finalized head diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index 44985789950..73013fdf3f9 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -204,18 +204,18 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co close(finishCh) if cfg.blobsBackfilling { go func() { - if err := downloadBlobHistoryWorker(cfg, ctx, logger); err != nil { + if err := downloadBlobHistoryWorker(cfg, ctx, true, logger); err != nil { logger.Error("Error downloading blobs", "err", err) } - // set a timer every 1 hour as a failsafe - ticker := time.NewTicker(time.Hour) + // set a timer every 15 minutes as a failsafe + ticker := time.NewTicker(15 * time.Minute) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: - if err := downloadBlobHistoryWorker(cfg, ctx, logger); err != nil { + if err := downloadBlobHistoryWorker(cfg, ctx, false, logger); err != nil { logger.Error("Error downloading blobs", "err", err) } } @@ -249,7 +249,7 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co } // downloadBlobHistoryWorker is a worker that downloads the blob history by using the already downloaded beacon blocks -func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Context, logger log.Logger) error { +func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Context, shouldLog bool, logger log.Logger) error { currentSlot := cfg.startingSlot + 1 blocksBatchSize := uint64(8) // requests 8 blocks worth of blobs at a time tx, err := cfg.indiciesDB.BeginRo(ctx) @@ -263,7 +263,7 @@ func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Co prevLogSlot := currentSlot prevTime := time.Now() targetSlot := cfg.beaconCfg.DenebForkEpoch * cfg.beaconCfg.SlotsPerEpoch - cfg.logger.Info("Downloading blobs backwards", "from", currentSlot, "to", targetSlot) + for currentSlot >= targetSlot { if currentSlot <= cfg.sn.FrozenBlobs() { break @@ -312,7 +312,9 @@ func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Co case <-ctx.Done(): return ctx.Err() case <-logInterval.C: - + if !shouldLog { + continue + } blkSec := float64(prevLogSlot-currentSlot) / time.Since(prevTime).Seconds() blkSecStr := fmt.Sprintf("%.1f", blkSec) // round to 1 decimal place and convert to string @@ -353,7 +355,9 @@ func downloadBlobHistoryWorker(cfg StageHistoryReconstructionCfg, ctx context.Co continue } } - log.Info("Blob history download finished successfully") + if shouldLog { + logger.Info("Blob history download finished successfully") + } cfg.antiquary.NotifyBlobBackfilled() return nil } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7cf4b19a16f..68f4e36227a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -926,7 +926,7 @@ var ( BeaconAPIFlag = cli.StringSliceFlag{ Name: "beacon.api", - Usage: "Enable beacon API (avaiable endpoints: beacon, builder, config, debug, events, node, validator, rewards, lighthouse)", + Usage: "Enable beacon API (avaiable endpoints: beacon, builder, config, debug, events, node, validator, lighthouse)", } BeaconApiProtocolFlag = cli.StringFlag{ Name: "beacon.api.protocol", From 2661ad347a994a7d4cb5269704eb792a182fad57 Mon Sep 17 00:00:00 2001 From: Kewei Date: Tue, 13 Aug 2024 21:37:06 +0900 Subject: [PATCH 18/49] Fix panic in caplin api get validator (#11419) (#11583) Fix https://github.com/erigontech/erigon/issues/11414 root cause: empty validator set --- cl/beacon/handler/validators.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go index 7ea0746f398..7b7d332fb25 100644 --- a/cl/beacon/handler/validators.go +++ b/cl/beacon/handler/validators.go @@ -404,12 +404,19 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt if err != nil { return nil, err } + if validatorSet == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validators not found")) + } balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) if err != nil { return nil, err } + if balances == nil { + return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("balances not found")) + } return responseValidator(validatorIndex, stateEpoch, balances, validatorSet, true) } + balances, err := a.forkchoiceStore.GetBalances(blockRoot) if err != nil { return nil, err From 60971ea2f825b68230fceb35ebde8c02b26c061e Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Tue, 13 Aug 2024 13:49:23 +0100 Subject: [PATCH 19/49] diagnostics: added api to get sys info data (#11589) (#11592) --- diagnostics/sysinfo.go | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/diagnostics/sysinfo.go b/diagnostics/sysinfo.go index 586692fa3ca..bf1d9498ecb 100644 --- a/diagnostics/sysinfo.go +++ b/diagnostics/sysinfo.go @@ -17,9 +17,11 @@ package diagnostics import ( + "encoding/json" "net/http" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/erigon-lib/sysutils" ) func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient) { @@ -32,8 +34,41 @@ func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClien w.Header().Set("Content-Type", "application/json") writeHardwareInfo(w, diag) }) + + metricsMux.HandleFunc("/cpu-usage", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "application/json") + writeCPUUsage(w) + }) + + metricsMux.HandleFunc("/processes-info", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "application/json") + writeProcessesInfo(w) + }) + + metricsMux.HandleFunc("/memory-info", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "application/json") + writeMemoryInfo(w) + }) } func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { diag.HardwareInfoJson(w) } + +func writeCPUUsage(w http.ResponseWriter) { + cpuusage := sysutils.CPUUsage() + json.NewEncoder(w).Encode(cpuusage) +} + +func writeProcessesInfo(w http.ResponseWriter) { + processes := sysutils.GetProcessesInfo() + json.NewEncoder(w).Encode(processes) +} + +func writeMemoryInfo(w http.ResponseWriter) { + totalMemory := sysutils.TotalMemoryUsage() + json.NewEncoder(w).Encode(totalMemory) +} From d24e5d45755d7b23075c507ad9216e1d60ad03de Mon Sep 17 00:00:00 2001 From: VBulikov Date: Wed, 14 Aug 2024 10:58:39 +0200 Subject: [PATCH 20/49] Bump version to 2.60.6 (#11607) --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 84287fbbf53..947146d3602 100644 --- a/params/version.go +++ b/params/version.go @@ -33,7 +33,7 @@ var ( const ( VersionMajor = 2 // Major version component of the current release VersionMinor = 60 // Minor version component of the current release - VersionMicro = 5 // Patch version component of the current release + VersionMicro = 6 // Patch version component of the current release VersionModifier = "" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" VersionKeyFinished = "ErigonVersionFinished" From 617308b5fceeed5a6e151cf2722223682546ea9b Mon Sep 17 00:00:00 2001 From: Kewei Date: Thu, 15 Aug 2024 21:29:41 +0900 Subject: [PATCH 21/49] fix panic issue in initializeStateAntiquaryIfNeeded (#11608) (#11624) related to https://github.com/erigontech/erigon/issues/11482 --- cl/antiquary/state_antiquary.go | 12 +++++++++--- .../historical_states_reader.go | 5 +++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go index dcd23df2ea7..94087884745 100644 --- a/cl/antiquary/state_antiquary.go +++ b/cl/antiquary/state_antiquary.go @@ -423,8 +423,9 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. return err } // We want to backoff by some slots until we get a correct state from DB. - // we start from 1 * clparams.SlotsPerDump. - backoffStep := uint64(10) + // we start from 10 * clparams.SlotsPerDump. + backoffStrides := uint64(10) + backoffStep := backoffStrides historicalReader := historical_states_reader.NewHistoricalStatesReader(s.cfg, s.snReader, s.validatorsTable, s.genesisState) @@ -447,6 +448,11 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. if err != nil { return fmt.Errorf("failed to read historical state at slot %d: %w", attempt, err) } + if s.currentState == nil { + log.Warn("historical state not found, backoff more and try again", "slot", attempt) + backoffStep += backoffStrides + continue + } computedBlockRoot, err := s.currentState.BlockRoot() if err != nil { @@ -459,7 +465,7 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. if computedBlockRoot != expectedBlockRoot { log.Debug("Block root mismatch, trying again", "slot", attempt, "expected", expectedBlockRoot) // backoff more - backoffStep += 10 + backoffStep += backoffStrides continue } break diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader.go b/cl/persistence/state/historical_states_reader/historical_states_reader.go index 85d1258c95b..79bf32fd38c 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader.go @@ -19,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/log/v3" libcommon "github.com/ledgerwatch/erigon-lib/common" ) @@ -62,6 +63,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. // If this happens, we need to update our static tables if slot > latestProcessedState || slot > r.validatorTable.Slot() { + log.Warn("slot is ahead of the latest processed state", "slot", slot, "latestProcessedState", latestProcessedState, "validatorTableSlot", r.validatorTable.Slot()) return nil, nil } @@ -74,6 +76,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, err } if block == nil { + log.Warn("block not found", "slot", slot) return nil, nil } blockHeader := block.SignedBeaconBlockHeader().Header @@ -84,6 +87,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, err } if slotData == nil { + log.Warn("slot data not found", "slot", slot) return nil, nil } roundedSlot := r.cfg.RoundSlotToEpoch(slot) @@ -93,6 +97,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return nil, fmt.Errorf("failed to read epoch data: %w", err) } if epochData == nil { + log.Warn("epoch data not found", "slot", slot, "roundedSlot", roundedSlot) return nil, nil } From d4647a13b699e29d4605cde72c67efa79be14fac Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Fri, 16 Aug 2024 07:51:11 +0100 Subject: [PATCH 22/49] diagnostics: fix setup (#11633) (#11639) Fixed issue with setup diagnostics client on erigon start --- diagnostics/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diagnostics/setup.go b/diagnostics/setup.go index e792ccef6ab..38ee64d1cf6 100644 --- a/diagnostics/setup.go +++ b/diagnostics/setup.go @@ -61,7 +61,7 @@ func Setup(ctx *cli.Context, node *node.ErigonNode, metricsMux *http.ServeMux, p pprofPort := ctx.Int(pprofPortFlag) pprofAddress := fmt.Sprintf("%s:%d", pprofHost, pprofPort) - if diagAddress == metricsAddress { + if diagAddress == metricsAddress && metricsMux != nil { diagMux = SetupDiagnosticsEndpoint(metricsMux, diagAddress) } else if diagAddress == pprofAddress && pprofMux != nil { diagMux = SetupDiagnosticsEndpoint(pprofMux, diagAddress) From 96fd184f9c5d3835e16255b8d06b97beb56a6827 Mon Sep 17 00:00:00 2001 From: Somnath Date: Sat, 17 Aug 2024 05:53:52 +0400 Subject: [PATCH 23/49] Add blob fee to prestate tracer (#11650) --- core/state_transition.go | 1 + core/vm/evmtypes/evmtypes.go | 1 + eth/tracers/native/prestate.go | 1 + 3 files changed, 3 insertions(+) diff --git a/core/state_transition.go b/core/state_transition.go index 1dd73ed42ba..bd4d9af1160 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -256,6 +256,7 @@ func (st *StateTransition) buyGas(gasBailout bool) error { } st.gasRemaining += st.msg.Gas() st.initialGas = st.msg.Gas() + st.evm.BlobFee = blobGasVal if subBalance { st.state.SubBalance(st.msg.From(), gasVal) diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go index 4b919f6b3e3..9d1e4c9d92e 100644 --- a/core/vm/evmtypes/evmtypes.go +++ b/core/vm/evmtypes/evmtypes.go @@ -42,6 +42,7 @@ type TxContext struct { TxHash common.Hash Origin common.Address // Provides information for ORIGIN GasPrice *uint256.Int // Provides information for GASPRICE + BlobFee *uint256.Int // The fee for blobs(blobGas * blobGasPrice) incurred in the txn BlobHashes []common.Hash // Provides versioned blob hashes for BLOBHASH } diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 16386a67b3b..94779c52cbf 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -116,6 +116,7 @@ func (t *prestateTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to li // The sender balance is after reducing: value. // We need to re-add it to get the pre-tx balance. fromBal.Add(fromBal, valueBig) + fromBal.Add(fromBal, env.BlobFee.ToBig()) // Nonce has been incremented before reaching here // when txn is not a "create". From 3d7c090448ba2435d8778952892272c93c0c300d Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com> Date: Tue, 20 Aug 2024 07:39:46 +0200 Subject: [PATCH 24/49] fix: debug_accountRange(): increase block_number on storage walk (e2) (#11669) I have done specific integration test(on e2) and compare the result with etherscan (block number 0x3D08FF: 3999999). etherscan link: https://etherscan.io/tx/0x535bd65cfee8655a1ffd8a28e065b47bcf557b10641491a541ab1dd08496709e#statechange for account 0xEce701C76bD00D1C3f96410a0C69eA8Dfcf5f34E if use block_number + 1 the debug_accountRange() returns correctly for storage 0x0000000000000000000000000000000000000000000000000000000000000004 the new value: 0x000000000000000000000000000000000000000000000064a66fad67042ceb4a; using current software returns the old value (0x000000000000000000000000000000000000000000000064a3a922764918eb4b). debug_accountRange() returns : "storage": { ...... "0x0000000000000000000000000000000000000000000000000000000000000004": "64a66fad67042ceb4a", ---- } If approved I will make change for e3 --- core/state/dump.go | 5 ++--- turbo/jsonrpc/debug_api_test.go | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/core/state/dump.go b/core/state/dump.go index fff70b3ddb6..90c255b53db 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -152,12 +152,11 @@ func (d *Dumper) DumpToCollector(c DumpCollector, excludeCode, excludeStorage bo if d.historyV3 { ttx := d.db.(kv.TemporalTx) var err error - // Why only account does +1? txNum, err = rawdbv3.TxNums.Min(ttx, d.blockNumber+1) if err != nil { return nil, err } - txNumForStorage, err = rawdbv3.TxNums.Min(ttx, d.blockNumber) + txNumForStorage, err = rawdbv3.TxNums.Min(ttx, d.blockNumber+1) if err != nil { return nil, err } @@ -281,7 +280,7 @@ func (d *Dumper) DumpToCollector(c DumpCollector, excludeCode, excludeStorage bo addr, incarnation, libcommon.Hash{}, /* startLocation */ - d.blockNumber, + d.blockNumber+1, func(_, loc, vs []byte) (bool, error) { account.Storage[libcommon.BytesToHash(loc).String()] = common.Bytes2Hex(vs) h, _ := libcommon.HashData(loc) diff --git a/turbo/jsonrpc/debug_api_test.go b/turbo/jsonrpc/debug_api_test.go index b59d037fae7..2fc69cf8f38 100644 --- a/turbo/jsonrpc/debug_api_test.go +++ b/turbo/jsonrpc/debug_api_test.go @@ -326,7 +326,7 @@ func TestAccountRange(t *testing.T) { n = rpc.BlockNumber(7) result, err = api.AccountRange(m.Ctx, rpc.BlockNumberOrHash{BlockNumber: &n}, addr[:], 1, false, false) require.NoError(t, err) - require.Equal(t, 0, len(result.Accounts[addr].Storage)) + require.Equal(t, 35, len(result.Accounts[addr].Storage)) n = rpc.BlockNumber(10) result, err = api.AccountRange(m.Ctx, rpc.BlockNumberOrHash{BlockNumber: &n}, addr[:], 1, false, false) From f30c4c43deda14446b9ba2c9b0c36ba8b0aa474e Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Sat, 24 Aug 2024 09:07:14 +0100 Subject: [PATCH 25/49] diagnostics: added func to grab heap profile (#11643) (#11649) - Added functionality to grab the heap profile by calling the diagnostics endpoint - Added support to pass heap profile file to diagnostics UI through WebSocket --- diagnostics/sysinfo.go | 16 ++++++++++++++++ turbo/app/support_cmd.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/diagnostics/sysinfo.go b/diagnostics/sysinfo.go index bf1d9498ecb..aae54428ed7 100644 --- a/diagnostics/sysinfo.go +++ b/diagnostics/sysinfo.go @@ -18,7 +18,9 @@ package diagnostics import ( "encoding/json" + "fmt" "net/http" + "runtime/pprof" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/sysutils" @@ -52,6 +54,20 @@ func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClien w.Header().Set("Content-Type", "application/json") writeMemoryInfo(w) }) + + metricsMux.HandleFunc("/heap-profile", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Content-Type", "aplication/profile") + writeHeapProfile(w) + }) +} + +func writeHeapProfile(w http.ResponseWriter) { + err := pprof.Lookup("heap").WriteTo(w, 0) + if err != nil { + http.Error(w, fmt.Sprintf("Failed to write profile: %v", err), http.StatusInternalServerError) + return + } } func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { diff --git a/turbo/app/support_cmd.go b/turbo/app/support_cmd.go index 7801e92ce67..35e2c482d7c 100644 --- a/turbo/app/support_cmd.go +++ b/turbo/app/support_cmd.go @@ -408,6 +408,37 @@ func tunnel(ctx context.Context, cancel context.CancelFunc, sigs chan os.Signal, }) } + case "aplication/profile": + if _, err := io.Copy(buffer, debugResponse.Body); err != nil { + return codec.WriteJSON(ctx1, &nodeResponse{ + Id: requestId, + Error: &responseError{ + Code: http.StatusInternalServerError, + Message: fmt.Sprintf("Request for metrics method [%s] failed: %v", debugURL, err), + }, + Last: true, + }) + } + + data, err := json.Marshal(struct { + Data []byte `json:"chunk"` + }{ + Data: buffer.Bytes(), + }) + + buffer = bytes.NewBuffer(data) + + if err != nil { + return codec.WriteJSON(ctx1, &nodeResponse{ + Id: requestId, + Error: &responseError{ + Code: int64(http.StatusInternalServerError), + Message: fmt.Sprintf("Can't copy metrics response for [%s]: %s", debugURL, err), + }, + Last: true, + }) + } + default: return codec.WriteJSON(ctx1, &nodeResponse{ Id: requestId, From 9746e6a27019cc46baa6ea14466a3f20b21392c3 Mon Sep 17 00:00:00 2001 From: lystopad Date: Thu, 29 Aug 2024 11:39:31 +0200 Subject: [PATCH 26/49] Backport changes in the release workflow. (#11791) --- .github/workflows/release.yml | 239 +++++-- .goreleaser.yml | 1115 +++++++++++++++++++++++++++++++-- Dockerfile.release | 54 +- 3 files changed, 1302 insertions(+), 106 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9b61c414e24..926812c3e3a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,65 +1,218 @@ name: Release -# Uncomment the following to let goreleaser automatically -# create a GitHub release when a tag is pushed. -# permissions: -# contents: write +env: + APPLICATION: "erigon" + BUILDER_IMAGE: "ghcr.io/goreleaser/goreleaser-cross:v1.21.13" + DOCKER_BASE_IMAGE: "alpine:3.20.2" + APP_REPO: "erigontech/erigon" + PACKAGE: "github.com/erigontech/erigon" + DOCKERHUB_REPOSITORY: "erigontech/erigon" + DOCKERFILE_PATH: "./Dockerfile.release" + GITHUB_AUTOMATION_EMAIL: "github-automation@erigon.tech" + GITHUB_AUTOMATION_NAME: "Erigon Github Automation" + LABEL_DESCRIPTION: "Erigon is an implementation of Ethereum (execution layer with embeddable consensus layer), on the efficiency frontier. Archive Node by default." on: push: branches-ignore: - '**' - tags: - - 'v*.*.*' - # to be used by fork patch-releases ^^ - - 'v*.*.*-*' + #branches: + # - 'master' + #tags: + ## only trigger on release tags: + #- 'v*.*.*' + #- 'v*.*.*-*' workflow_dispatch: + inputs: + checkout_ref: + required: true + type: string + default: 'main' + description: 'The branch to checkout and build artifacts from. By default "main".' + release_version: + required: true + type: string + description: 'Release version number (Pattern - #.#.# , f.e. 2.41.3 or 3.0.0 or 3.0.0-alpha1 for pre-releases. Do not prefix it with "v".)' + perform_release: + required: false + type: boolean + default: false + description: 'perform_release: when set then all artifacts will be published and the DRAFT of the release + notes will be created.' + publish_latest_tag: + required: false + type: boolean + default: false + description: 'publish_latest_tag: when set then docker image with tag :latest will be also published' jobs: - goreleaser: - runs-on: ubuntu-latest - steps: - - name: Maximize build space - uses: AdityaGarg8/remove-unwanted-software@v1 - with: - remove-dotnet: 'true' - remove-android: 'true' - remove-haskell: 'true' - - name: Checkout - uses: actions/checkout@v3 + build-release: + ## runs-on: ubuntu-22.04 + runs-on: ubuntu-latest-devops-xxlarge + timeout-minutes: 30 + name: Build Artifacts and multi-platform Docker image, publish draft of the Release Notes + + steps: + - name: Checkout git repository ${{ env.APP_REPO }} + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 ## 4.1.7 release with: + repository: ${{ env.APP_REPO }} fetch-depth: 0 + ref: ${{ inputs.checkout_ref }} - - name: dockerhub-login - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB }} - password: ${{ secrets.DOCKERHUB_KEY }} - - name: ghcr-login - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} + - name: Check if tag ${{ inputs.release_version }} already exists in case perform_release is set. + if: ${{ (inputs.perform_release) && (inputs.release_version != '') }} + run: | + if git ls-remote --exit-code --quiet --tags origin '${{ inputs.release_version }}'; then + echo "ERROR: tag ${{ inputs.release_version }} exists and workflow is performing release. Exit." + exit 1 + else + echo "OK: tag ${{ inputs.release_version }} does not exists. Proceeding." + fi - - name: Prepare - id: prepare + - name: Get commit id + id: getCommitId run: | - TAG=${GITHUB_REF#refs/tags/} - echo "tag_name=${TAG}" >> $GITHUB_OUTPUT + echo "id=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + echo "short_commit_id=$(git rev-parse --short=7 HEAD)" >> $GITHUB_OUTPUT + + - name: Login to Docker Hub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 ## v3.3.0 + with: + username: ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_USERNAME }} + password: ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_TOKEN }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf ## v3.2.0 - - run: echo ${{ steps.prepare.outputs.tag_name }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db ## v3.6.1 - - name: Run GoReleaser + - name: Build binaries with goreleaser + env: + BUILD_VERSION: ${{ inputs.release_version }} + DOCKER_URL: ${{ env.DOCKERHUB_REPOSITORY }} run: | - make release - docker images + docker run --rm \ + -w /${{ env.APPLICATION }}/ \ + -e BUILD_VERSION=${{ env.BUILD_VERSION }} \ + -e GIT_COMMIT=${{ steps.getCommitId.outputs.id }} \ + -e GIT_BRANCH=${{ inputs.checkout_ref }} \ + -e GIT_TAG=${{ inputs.release_version }} \ + -e PACKAGE=${{ env.PACKAGE }} \ + -e APPLICATION=${{ env.APPLICATION }} \ + -v $(pwd):/${{ env.APPLICATION}} \ + -v /var/run/docker.sock:/var/run/docker.sock \ + ${{ env.BUILDER_IMAGE }} release --clean --skip=validate,announce,publish + echo "DEBUG: ls -lao in the working directory" + ls -lao + echo "DEBUG: content of the dist/ directory" + find dist/ -ls + + - name: Build and push multi-platform docker images (${{ env.BUILD_VERSION }} and latest) in case perform_release is true + if: ${{ inputs.perform_release }} + env: + BUILD_VERSION: ${{ inputs.release_version }} + DOCKER_URL: ${{ env.DOCKERHUB_REPOSITORY }} + DOCKER_PUBLISH_LATEST_CONDITION: ${{ inputs.publish_latest_tag && format('--tag {0}:latest ',env.DOCKERHUB_REPOSITORY) || '' }} + run: | + docker buildx build \ + --file ${{ env.DOCKERFILE_PATH }} \ + --build-arg DOCKER_BASE_IMAGE=${{ env.DOCKER_BASE_IMAGE }} \ + --build-arg VERSION=${{ env.BUILD_VERSION }} \ + --build-arg APPLICATION=${{ env.APPLICATION }} \ + --tag ${{ env.DOCKER_URL }}:${{ env.BUILD_VERSION }} \ + ${{ env.DOCKER_PUBLISH_LATEST_CONDITION }} \ + --label org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \ + --label org.opencontainers.image.authors="https://github.com/erigontech/erigon/graphs/contributors" \ + --label org.opencontainers.image.url="https://github.com/erigontech/erigon/blob/main/Dockerfile" \ + --label org.opencontainers.image.documentation="https://github.com/erigontech/erigon/blob/main/Dockerfile" \ + --label org.opencontainers.image.source="https://github.com/erigontech/erigon/blob/main/Dockerfile" \ + --label org.opencontainers.image.version=${{ inputs.release_version }} \ + --label org.opencontainers.image.revision=${{ steps.getCommitId.outputs.id }} \ + --label org.opencontainers.image.vcs-ref-short=${{ steps.getCommitId.outputs.short_commit_id }} \ + --label org.opencontainers.image.vendor="${{ github.repository_owner }}" \ + --label org.opencontainers.image.description="${{ env.LABEL_DESCRIPTION }}" \ + --label org.opencontainers.image.base.name="${{ env.DOCKER_BASE_IMAGE }}" \ + --push \ + --platform linux/amd64/v2,linux/arm64 . + + - name: Upload artifact -- linux/arm64 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + with: + name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_arm64.tar.gz + path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_arm64.tar.gz + retention-days: 1 + compression-level: 0 + if-no-files-found: error + + - name: Upload artifact -- linux/amd64 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + with: + name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_amd64.tar.gz + path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_amd64.tar.gz + retention-days: 1 + compression-level: 0 + if-no-files-found: error + + - name: Upload artifact -- darwin/arm64 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + with: + name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_arm64.tar.gz + path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_arm64.tar.gz + retention-days: 1 + compression-level: 0 + if-no-files-found: error + + - name: Upload artifact -- darwin/amd64 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + with: + name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_amd64.tar.gz + path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_amd64.tar.gz + retention-days: 1 + compression-level: 0 + if-no-files-found: error + + - name: Upload artifact -- windows/amd64 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + with: + name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_windows_amd64.zip + path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_windows_amd64.zip + retention-days: 1 + compression-level: 0 + if-no-files-found: error + + - name: Upload artifact -- checksum + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + with: + name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_checksums.txt + path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_checksums.txt + retention-days: 1 + compression-level: 0 + if-no-files-found: error + +## not required for now -- commented: +# - name: Create and push a git tag for the released version in case perform_release is set +# if: ${{ inputs.perform_release }} +# run: | +# git config --global user.email ${{ env.GITHUB_AUTOMATION_EMAIL }} +# git config --global user.name "${{ env.GITHUB_AUTOMATION_NAME }}" +# git tag -a ${{ inputs.release_version }} -m "Release ${{ inputs.release_version }}" +# git push origin ${{ inputs.release_version }} + + - name: Publish draft of the Release notes with assets (without windows .zip) in case perform_release is set + if: ${{ inputs.perform_release }} env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - VERSION: ${{ steps.prepare.outputs.tag_name }} - DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} \ No newline at end of file + GH_TOKEN: ${{ github.token }} + GH_REPO: ${{ github.repository }} + DOCKER_TAGS: ${{ env.DOCKERHUB_REPOSITORY }}:${{ inputs.release_version }} + GITHUB_RELEASE_TARGET: ${{ inputs.checkout_ref }} + run: | + cd dist + gh release create ${{ inputs.release_version }} *.tar.gz *_checksums.txt \ + --generate-notes \ + --target ${GITHUB_RELEASE_TARGET} \ + --draft=true \ + --title "${{ inputs.release_version }}" \ + --notes "**Improvements:**
- ...coming soon

**Bugfixes:**

- ...coming soon

**Docker images:**

Docker image released:
${{ env.DOCKER_TAGS }}

... coming soon
" \ No newline at end of file diff --git a/.goreleaser.yml b/.goreleaser.yml index 5fb729043f8..4df8d0a5385 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,23 +1,229 @@ + +#version: 2 + project_name: erigon release: - disable: false + disable: true draft: true prerelease: auto +env: + - CGO_ENABLED=1 + - GOPRIVATE=github.com/erigontech/silkworm-go + - BUILD_VERSION={{ .Env.BUILD_VERSION }} + - CGO_CFLAGS_DEFAULT=-DMDBX_ENV_CHECKPID=0 -DMDBX_DISABLE_VALIDATION=0 -DMDBX_FORCE_ASSERTIONS=0 -Wno-unknown-warning-option -Wno-enum-int-mismatch -Wno-strict-prototypes -Wno-unused-but-set-variable -O3 + - CGO_CFLAGS_WINDOWS=-Wno-unknown-warning-option -Wno-enum-int-mismatch -Wno-strict-prototypes -Wno-unused-but-set-variable -g -O2 -D__BLST_PORTABLE__ + - CGO_LDFLAGS_DEFAULT=-O3 -g + - CGO_LDFLAGS_DEFAULT_DARWIN=-O3 -g + builds: - - id: darwin-amd64 + +## Darwin AMD64: + - id: darwin-amd64-erigon main: ./cmd/erigon binary: erigon goos: [ darwin ] goarch: [ amd64 ] + goamd64: + - v2 env: - CC=o64-clang - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} tags: [ nosqlite, noboltdb, nosilkworm ] - ldflags: -s -w + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} - - id: darwin-arm64 + - id: darwin-amd64-downloader + main: ./cmd/downloader + binary: downloader + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-devnet + main: ./cmd/devnet + binary: devnet + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-evm + main: ./cmd/evm + binary: evm + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-caplin + main: ./cmd/caplin + binary: caplin + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-diag + main: ./cmd/diag + binary: diag + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-integration + main: ./cmd/integration + binary: integration + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-rpcdaemon + main: ./cmd/rpcdaemon + binary: rpcdaemon + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-sentry + main: ./cmd/sentry + binary: sentry + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-amd64-txpool + main: ./cmd/txpool + binary: txpool + goos: [ darwin ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=o64-clang + - CXX=o64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} +## End Darwin AMD64 + + +## Darwin ARM64: + - id: darwin-arm64-erigon main: ./cmd/erigon binary: erigon goos: [ darwin ] @@ -25,21 +231,385 @@ builds: env: - CC=oa64-clang - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ tags: [ nosqlite, noboltdb, nosilkworm ] - ldflags: -s -w + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} - - id: linux-amd64 + - id: darwin-arm64-downloader + main: ./cmd/downloader + binary: downloader + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-devnet + main: ./cmd/devnet + binary: devnet + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-evm + main: ./cmd/evm + binary: evm + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-caplin + main: ./cmd/caplin + binary: caplin + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-diag + main: ./cmd/diag + binary: diag + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-integration + main: ./cmd/integration + binary: integration + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-rpcdaemon + main: ./cmd/rpcdaemon + binary: rpcdaemon + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-sentry + main: ./cmd/sentry + binary: sentry + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: darwin-arm64-txpool + main: ./cmd/txpool + binary: txpool + goos: [ darwin ] + goarch: [ arm64 ] + env: + - CC=oa64-clang + - CXX=oa64-clang++ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT_DARWIN }} + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} +## End Darwin ARM64 + + +## Linux AMD64: + - id: linux-amd64-erigon main: ./cmd/erigon binary: erigon goos: [ linux ] goarch: [ amd64 ] + goamd64: + - v2 env: - CC=x86_64-linux-gnu-gcc - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} tags: [ nosqlite, noboltdb, nosilkworm ] - ldflags: -s -w -extldflags "-static" # We need to build a static binary because we are building in a glibc based system and running in a musl container + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} - - id: linux-arm64 + - id: linux-amd64-downloader + main: ./cmd/downloader + binary: downloader + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-devnet + main: ./cmd/devnet + binary: devnet + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-evm + main: ./cmd/evm + binary: evm + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-caplin + main: ./cmd/caplin + binary: caplin + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-diag + main: ./cmd/diag + binary: diag + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-integration + main: ./cmd/integration + binary: integration + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-rpcdaemon + main: ./cmd/rpcdaemon + binary: rpcdaemon + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-sentry + main: ./cmd/sentry + binary: sentry + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-amd64-txpool + main: ./cmd/txpool + binary: txpool + goos: [ linux ] + goarch: [ amd64 ] + goamd64: + - v2 + env: + - CC=x86_64-linux-gnu-gcc + - CXX=x86_64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} +## End of Linux AMD64 + + +## Linux ARM64 + - id: linux-arm64-erigon main: ./cmd/erigon binary: erigon goos: [ linux ] @@ -47,10 +617,182 @@ builds: env: - CC=aarch64-linux-gnu-gcc - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} tags: [ nosqlite, noboltdb, nosilkworm ] - ldflags: -s -w -extldflags "-static" # We need to build a static binary because we are building in a glibc based system and running in a musl container + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} - - id: windows-amd64 + - id: linux-arm64-downloader + main: ./cmd/downloader + binary: downloader + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-devnet + main: ./cmd/devnet + binary: devnet + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-evm + main: ./cmd/evm + binary: evm + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-caplin + main: ./cmd/caplin + binary: caplin + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-diag + main: ./cmd/diag + binary: diag + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-integration + main: ./cmd/integration + binary: integration + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-rpcdaemon + main: ./cmd/rpcdaemon + binary: rpcdaemon + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-sentry + main: ./cmd/sentry + binary: sentry + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: linux-arm64-txpool + main: ./cmd/txpool + binary: txpool + goos: [ linux ] + goarch: [ arm64 ] + env: + - CC=aarch64-linux-gnu-gcc + - CXX=aarch64-linux-gnu-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_DEFAULT }} -D__BLST_PORTABLE__ + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} +## End of Linux ARM64 + + +## Windows AMD64: + - id: windows-amd64-erigon main: ./cmd/erigon binary: erigon goos: [ windows ] @@ -58,51 +800,310 @@ builds: env: - CC=x86_64-w64-mingw32-gcc - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -v + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-downloader + main: ./cmd/downloader + binary: downloader + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 tags: [ nosqlite, noboltdb, nosilkworm ] - ldflags: -s -w - - -snapshot: - name_template: "{{ .Tag }}.next" - -dockers: - - image_templates: - - thorax/{{ .ProjectName }}:{{ .Version }}-amd64 - dockerfile: Dockerfile.release - use: buildx - skip_push: true - goarch: amd64 - ids: - - linux-amd64 - build_flag_templates: - - --platform=linux/amd64 - - - image_templates: - - thorax/{{ .ProjectName }}:{{ .Version }}-arm64 - dockerfile: Dockerfile.release - skip_push: true - use: buildx - goarch: arm64 - ids: - - linux-arm64 - build_flag_templates: - - --platform=linux/arm64/v8 - -docker_manifests: - - name_template: thorax/{{ .ProjectName }}:{{ .Version }} - skip_push: true - image_templates: - - thorax/{{ .ProjectName }}:{{ .Version }}-amd64 - - thorax/{{ .ProjectName }}:{{ .Version }}-arm64 - - - name_template: thorax/{{ .ProjectName }}:latest - skip_push: true - image_templates: - - thorax/{{ .ProjectName }}:{{ .Version }}-amd64 - - thorax/{{ .ProjectName }}:{{ .Version }}-arm64 - -announce: - slack: - enabled: false - # The name of the channel that the user selected as a destination for webhook messages. - channel: '#code-releases' + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-devnet + main: ./cmd/devnet + binary: devnet + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-evm + main: ./cmd/evm + binary: evm + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-caplin + main: ./cmd/caplin + binary: caplin + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-diag + main: ./cmd/diag + binary: diag + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-integration + main: ./cmd/integration + binary: integration + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-rpcdaemon + main: ./cmd/rpcdaemon + binary: rpcdaemon + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-sentry + main: ./cmd/sentry + binary: sentry + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} + + - id: windows-amd64-txpool + main: ./cmd/txpool + binary: txpool + goos: [ windows ] + goarch: [ amd64 ] + env: + - CC=x86_64-w64-mingw32-gcc + - CXX=x86_64-w64-mingw32-g++ + - CGO_CFLAGS={{ .Env.CGO_CFLAGS_WINDOWS }} + - CGO_LDFLAGS={{ .Env.CGO_LDFLAGS_DEFAULT }} + - CMAKE_MDBX_BUILD_SHARED_LIBRARY:BOOL=OFF + - CMAKE_MDBX_WITHOUT_MSVC_CRT:BOOOL=OFF + - CMAKE_MDBX_BUILD_TIMESTAMP=unknown + - CMAKE_MDBX_FORCE_ASSERTIONS:INT=0 + tags: [ nosqlite, noboltdb, nosilkworm ] + flags: + - -trimpath + - -buildvcs=false + ldflags: + - -s -w -extldflags "-static" + - -X {{ .Env.PACKAGE }}/params.GitCommit={{ .Env.GIT_COMMIT }} -X {{ .Env.PACKAGE }}/params.GitBranch={{ .Env.GIT_BRANCH }} -X {{ .Env.PACKAGE }}/params.GitTag={{ .Env.GIT_TAG }} +## Windows AMD64 + + +## Checksums +checksum: + name_template: "{{ .Env.APPLICATION }}_{{ .Env.BUILD_VERSION }}_checksums.txt" + algorithm: sha256 + ids: + - linux-arm64 + - linux-amd64 + - darwin-amd64 + - darwin-arm64 + # - windows-amd64 + + +archives: + - id: linux-arm64 + builds: + - linux-arm64-erigon + - linux-arm64-downloader + - linux-arm64-devnet + - linux-arm64-evm + - linux-arm64-caplin + - linux-arm64-diag + - linux-arm64-integration + - linux-arm64-rpcdaemon + - linux-arm64-sentry + - linux-arm64-txpool + name_template: "{{ .Env.APPLICATION }}_{{ .Env.BUILD_VERSION }}_{{ .Os }}_{{ .Arch }}" + wrap_in_directory: true + format: tar.gz + + - id: linux-amd64 + builds: + - linux-amd64-erigon + - linux-amd64-downloader + - linux-amd64-devnet + - linux-amd64-evm + - linux-amd64-caplin + - linux-amd64-diag + - linux-amd64-integration + - linux-amd64-rpcdaemon + - linux-amd64-sentry + - linux-amd64-txpool + name_template: "{{ .Env.APPLICATION }}_{{ .Env.BUILD_VERSION }}_{{ .Os }}_{{ .Arch }}" + wrap_in_directory: true + format: tar.gz + + - id: darwin-amd64 + builds: + - darwin-amd64-erigon + - darwin-amd64-downloader + - darwin-amd64-devnet + - darwin-amd64-evm + - darwin-amd64-caplin + - darwin-amd64-diag + - darwin-amd64-integration + - darwin-amd64-rpcdaemon + - darwin-amd64-sentry + - darwin-amd64-txpool + name_template: "{{ .Env.APPLICATION }}_{{ .Env.BUILD_VERSION }}_{{ .Os }}_{{ .Arch }}" + wrap_in_directory: true + format: tar.gz + + - id: darwin-arm64 + builds: + - darwin-arm64-erigon + - darwin-arm64-downloader + - darwin-arm64-devnet + - darwin-arm64-evm + - darwin-arm64-caplin + - darwin-arm64-diag + - darwin-arm64-integration + - darwin-arm64-rpcdaemon + - darwin-arm64-sentry + - darwin-arm64-txpool + name_template: "{{ .Env.APPLICATION }}_{{ .Env.BUILD_VERSION }}_{{ .Os }}_{{ .Arch }}" + wrap_in_directory: true + format: tar.gz + + - id: windows-amd64 + builds: + - windows-amd64-erigon + - windows-amd64-downloader + - windows-amd64-devnet + - windows-amd64-evm + - windows-amd64-caplin + - windows-amd64-diag + - windows-amd64-integration + - windows-amd64-rpcdaemon + - windows-amd64-sentry + - windows-amd64-txpool + name_template: "{{ .Env.APPLICATION }}_{{ .Env.BUILD_VERSION }}_{{ .Os }}_{{ .Arch }}" + wrap_in_directory: true + format: zip diff --git a/Dockerfile.release b/Dockerfile.release index 115ac10910b..a546d5fceb4 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,8 +1,50 @@ -FROM alpine:3.14 +ARG DOCKER_BASE_IMAGE="alpine:3.20.1" -RUN apk add --no-cache ca-certificates && \ - mkdir -p /etc/erigon -COPY erigon /usr/local/bin/ +## Note TARGETARCH is a crucial variable: +## see https://docs.docker.com/reference/dockerfile/#automatic-platform-args-in-the-global-scope -EXPOSE 8545 8551 8546 30303 30303/udp 42069 42069/udp 8080 9090 6060 -ENTRYPOINT ["erigon"] +FROM ${DOCKER_BASE_IMAGE} AS temporary +ARG TARGETARCH \ + VERSION=${VERSION} \ + APPLICATION + +COPY ./dist/${APPLICATION}_${VERSION}_linux_${TARGETARCH}.tar.gz /tmp/${APPLICATION}.tar.gz +RUN tar xzvf /tmp/${APPLICATION}.tar.gz -C /tmp && \ + mv /tmp/${APPLICATION}_${VERSION}_linux_${TARGETARCH} /tmp/${APPLICATION} + +FROM ${DOCKER_BASE_IMAGE} + +ARG USER=erigon \ + GROUP=erigon \ + APPLICATION + +RUN --mount=type=bind,from=temporary,source=/tmp/${APPLICATION},target=/tmp/${APPLICATION} \ + apk add --no-cache ca-certificates tzdata && \ + addgroup ${GROUP} && \ + adduser -D -h /home/${USER} -G ${GROUP} ${USER} && \ + install -d -o ${USER} -g ${GROUP} /home/${USER}/.local /home/${USER}/.local/share /home/${USER}/.local/share/erigon && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/erigon /usr/local/bin/ && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/integration /usr/local/bin/ && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/diag /usr/local/bin/ && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/sentry /usr/local/bin/ && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/txpool /usr/local/bin/ && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/downloader /usr/local/bin/ && \ + install -o ${USER} -g ${GROUP} /tmp/${APPLICATION}/rpcdaemon /usr/local/bin/ + +VOLUME [ "/home/${USER}" ] +WORKDIR /home/${USER} + +USER ${USER} + +EXPOSE 8545 \ + 8551 \ + 8546 \ + 30303 \ + 30303/udp \ + 42069 \ + 42069/udp \ + 8080 \ + 9090 \ + 6060 + +ENTRYPOINT [ "/usr/local/bin/erigon" ] \ No newline at end of file From 7cc27cd0ce4063dcdd0bc687938dbf9153138598 Mon Sep 17 00:00:00 2001 From: lystopad Date: Thu, 29 Aug 2024 14:23:05 +0200 Subject: [PATCH 27/49] Switch to ubuntu-22.04 runner on release/2.60 branch (#11795) --- .github/workflows/release.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 926812c3e3a..2ff71b4b6e1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -48,9 +48,9 @@ on: jobs: build-release: - ## runs-on: ubuntu-22.04 - runs-on: ubuntu-latest-devops-xxlarge - timeout-minutes: 30 + runs-on: ubuntu-22.04 + ## runs-on: ubuntu-latest-devops-xxlarge + timeout-minutes: 120 name: Build Artifacts and multi-platform Docker image, publish draft of the Release Notes steps: @@ -215,4 +215,4 @@ jobs: --target ${GITHUB_RELEASE_TARGET} \ --draft=true \ --title "${{ inputs.release_version }}" \ - --notes "**Improvements:**
- ...coming soon

**Bugfixes:**

- ...coming soon

**Docker images:**

Docker image released:
${{ env.DOCKER_TAGS }}

... coming soon
" \ No newline at end of file + --notes "**Improvements:**
- ...coming soon

**Bugfixes:**

- ...coming soon

**Docker images:**

Docker image released:
${{ env.DOCKER_TAGS }}

... coming soon
" From fec000b14075ffe3f84f4c1976582a75779b2cb9 Mon Sep 17 00:00:00 2001 From: Michelangelo Riccobene Date: Fri, 30 Aug 2024 14:07:37 +0200 Subject: [PATCH 28/49] qa-tests: add rpc integration tests (release/2.60) (#11768) Import Silkworm RPC Integration tests --- .../workflows/qa-rpc-integration-tests.yml | 323 ++++++++++++++++++ 1 file changed, 323 insertions(+) create mode 100644 .github/workflows/qa-rpc-integration-tests.yml diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml new file mode 100644 index 00000000000..e9909c50efd --- /dev/null +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -0,0 +1,323 @@ +name: QA - RPC Integration Tests + +on: + push: + branches: + - 'release/2.*' + pull_request: + branches: + - 'release/2.*' + types: + - ready_for_review + workflow_dispatch: # Run manually + +jobs: + integration-test-suite: + runs-on: [self-hosted, Erigon2] + env: + ERIGON_REFERENCE_DATA_DIR: /opt/erigon-versions/reference-version/datadir + ERIGON_TESTBED_DATA_DIR: /opt/erigon-testbed/datadir + ERIGON_QA_PATH: /home/qarunner/erigon-qa + RPC_PAST_TEST_DIR: /opt/rpc-past-tests + CHAIN: mainnet + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Checkout RPC Tests Repository & Install Requirements + run: | + rm -rf ${{ runner.workspace }}/rpc-tests + git -c advice.detachedHead=false clone --depth 1 --branch v0.42.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests + cd ${{ runner.workspace }}/rpc-tests + pip3 install -r requirements.txt + + - name: Clean Erigon Build Directory + run: | + make clean + + - name: Build Erigon RPCDaemon + run: | + make erigon + working-directory: ${{ github.workspace }} + + - name: Pause the Erigon instance dedicated to db maintenance + run: | + python3 $ERIGON_QA_PATH/test_system/db-producer/pause_production.py || true + + - name: Restore Erigon Testbed Data Directory + run: | + rsync -a --delete $ERIGON_REFERENCE_DATA_DIR/ $ERIGON_TESTBED_DATA_DIR/ + + - name: Run RpcDaemon + working-directory: ${{ github.workspace }}/build/bin + run: | + echo "Erigon (RpcDaemon) starting..." + + ./erigon --datadir $ERIGON_TESTBED_DATA_DIR --http.api admin,debug,eth,parity,erigon,trace,web3,txpool,ots,net --ws --verbosity 1 > erigon.log 2>&1 & + + RPC_DAEMON_PID=$! + echo "RPC_DAEMON_PID=$RPC_DAEMON_PID" >> $GITHUB_ENV + + echo "Erigon (RpcDaemon) started" + + - name: Wait for port 8545 to be opened + run: | + for i in {1..30}; do + if nc -z localhost 8545; then + echo "Port 8545 is open" + break + fi + echo "Waiting for port 8545 to open..." + sleep 10 + done + if ! nc -z localhost 8545; then + echo "Port 8545 did not open in time" + exit 1 + fi + + - name: Run RPC Integration Tests + id: test_step + run: | + set +e # Disable exit on error + commit=$(git -C ${{runner.workspace}}/erigon rev-parse --short HEAD) + + cd ${{ runner.workspace }}/rpc-tests/integration + rm -rf ./mainnet/results/ + + # Run RPC integration test runner via http + python3 ./run_tests.py -p 8545 --continue -f -x debug_accountRange/test_09.tar,\ + debug_accountRange/test_12.tar,\ + debug_accountRange/test_14.json,\ + debug_getModifiedAccountsByHash/test_01.json,\ + debug_getModifiedAccountsByHash/test_02.json,\ + debug_getModifiedAccountsByHash/test_03.json,\ + debug_getModifiedAccountsByHash/test_04.json,\ + debug_getModifiedAccountsByHash/test_05.tar,\ + debug_getModifiedAccountsByHash/test_06.tar,\ + debug_getModifiedAccountsByHash/test_07.tar,\ + debug_getModifiedAccountsByHash/test_08.json,\ + debug_getModifiedAccountsByHash/test_09.json,\ + debug_getModifiedAccountsByHash/test_10.json,\ + debug_getModifiedAccountsByHash/test_11.json,\ + debug_getModifiedAccountsByHash/test_12.json,\ + debug_getModifiedAccountsByHash/test_13.tar,\ + debug_getModifiedAccountsByHash/test_14.json,\ + debug_getModifiedAccountsByHash/test_15.tar,\ + debug_getModifiedAccountsByHash/test_16.tar,\ + debug_getModifiedAccountsByHash/test_17.tar,\ + debug_getModifiedAccountsByHash/test_18.tar,\ + debug_getModifiedAccountsByNumber/test_01.json,\ + debug_getModifiedAccountsByNumber/test_02.json,\ + debug_getModifiedAccountsByNumber/test_03.json,\ + debug_getModifiedAccountsByNumber/test_04.json,\ + debug_getModifiedAccountsByNumber/test_05.tar,\ + debug_getModifiedAccountsByNumber/test_06.tar,\ + debug_getModifiedAccountsByNumber/test_07.tar,\ + debug_getModifiedAccountsByNumber/test_08.json,\ + debug_getModifiedAccountsByNumber/test_09.json,\ + debug_getModifiedAccountsByNumber/test_10.json,\ + debug_getModifiedAccountsByNumber/test_11.json,\ + debug_getModifiedAccountsByNumber/test_12.json,\ + debug_getModifiedAccountsByNumber/test_13.tar,\ + debug_getModifiedAccountsByNumber/test_14.json,\ + debug_getModifiedAccountsByNumber/test_15.tar,\ + debug_getModifiedAccountsByNumber/test_16.tar,\ + debug_getModifiedAccountsByNumber/test_17.tar,\ + debug_getModifiedAccountsByNumber/test_18.json,\ + debug_traceBlockByHash/test_01.tar,\ + debug_traceBlockByHash/test_02.tar,\ + debug_traceBlockByHash/test_03.tar,\ + debug_traceBlockByNumber/test_03.tar,\ + debug_traceBlockByNumber/test_05.tar,\ + debug_traceBlockByNumber/test_06.tar,\ + debug_traceBlockByNumber/test_08.tar,\ + debug_traceBlockByNumber/test_09.tar,\ + debug_traceBlockByNumber/test_10.tar,\ + debug_traceBlockByNumber/test_11.tar,\ + debug_traceBlockByNumber/test_12.tar,\ + debug_traceCall/test_01.json,\ + debug_traceCall/test_02.json,\ + debug_traceCall/test_05.tar,\ + debug_traceCall/test_06.tar,\ + debug_traceCall/test_07.tar,\ + debug_traceCall/test_08.tar,\ + debug_traceCall/test_09.json,\ + debug_traceCall/test_10.tar,\ + debug_traceCallMany/test_01.json,\ + debug_traceCallMany/test_02.json,\ + debug_traceCallMany/test_05.tar,\ + debug_traceCallMany/test_06.tar,\ + debug_traceCallMany/test_07.tar,\ + debug_traceCallMany/test_08.tar,\ + debug_traceCallMany/test_09.json,\ + debug_traceCallMany/test_10.tar,\ + debug_traceTransaction/test_02.tar,\ + debug_traceTransaction/test_03.tar,\ + debug_traceTransaction/test_05.tar,\ + debug_traceTransaction/test_06.tar,\ + debug_traceTransaction/test_07.tar,\ + debug_traceTransaction/test_10.tar,\ + debug_traceTransaction/test_11.tar,\ + debug_traceTransaction/test_13.tar,\ + debug_traceTransaction/test_16.json,\ + debug_traceTransaction/test_17.tar,\ + debug_traceTransaction/test_18.tar,\ + debug_traceTransaction/test_19.json,\ + debug_traceTransaction/test_20.json,\ + debug_traceTransaction/test_21.json,\ + debug_traceTransaction/test_23.json,\ + debug_traceTransaction/test_24.json,\ + debug_traceTransaction/test_25.json,\ + debug_traceTransaction/test_26.json,\ + debug_traceTransaction/test_27.json,\ + debug_traceTransaction/test_28.tar,\ + engine_exchangeCapabilities/test_1.json,\ + engine_exchangeTransitionConfigurationV1/test_01.json,\ + engine_getClientVersionV1/test_1.json,\ + erigon_getLogsByHash/test_04.json,\ + eth_callBundle/test_09.json,\ + eth_callBundle/test_12.json,\ + eth_createAccessList/test_06.json,\ + eth_createAccessList/test_07.json,\ + eth_createAccessList/test_15.json,\ + eth_createAccessList/test_16.json,\ + eth_getBlockTransactionCountByHash/test_02.json,\ + eth_getBlockTransactionCountByNumber/test_08.json,\ + eth_getUncleCountByBlockHash/test_03.json,\ + parity_getBlockReceipts/test_01.json,\ + parity_getBlockReceipts/test_02.json,\ + parity_getBlockReceipts/test_03.json,\ + parity_getBlockReceipts/test_04.json,\ + parity_getBlockReceipts/test_05.json,\ + parity_getBlockReceipts/test_06.json,\ + parity_getBlockReceipts/test_07.json,\ + parity_getBlockReceipts/test_08.json,\ + parity_getBlockReceipts/test_09.json,\ + parity_getBlockReceipts/test_10.json,\ + trace_call/test_02.json,\ + trace_call/test_04.tar,\ + trace_call/test_08.tar,\ + trace_call/test_11.tar,\ + trace_call/test_13.json,\ + trace_call/test_17.tar,\ + trace_call/test_19.tar,\ + trace_call/test_20.json,\ + trace_callMany/test_01.json,\ + trace_callMany/test_02.json,\ + trace_callMany/test_03.json,\ + trace_callMany/test_04.json,\ + trace_callMany/test_05.json,\ + trace_callMany/test_06.json,\ + trace_callMany/test_08.json,\ + trace_callMany/test_09.json,\ + trace_callMany/test_10.json,\ + trace_callMany/test_11.json,\ + trace_rawTransaction/test_01.json,\ + trace_rawTransaction/test_03.json,\ + trace_replayBlockTransactions/test_01.tar,\ + trace_replayBlockTransactions/test_02.tar,\ + trace_replayBlockTransactions/test_03.tar,\ + trace_replayBlockTransactions/test_04.tar,\ + trace_replayBlockTransactions/test_05.tar,\ + trace_replayBlockTransactions/test_08.tar,\ + trace_replayBlockTransactions/test_10.json,\ + trace_replayBlockTransactions/test_11.json,\ + trace_replayBlockTransactions/test_13.tar,\ + trace_replayBlockTransactions/test_14.tar,\ + trace_replayBlockTransactions/test_15.tar,\ + trace_replayBlockTransactions/test_16.tar,\ + trace_replayBlockTransactions/test_17.tar,\ + trace_replayBlockTransactions/test_18.tar,\ + trace_replayBlockTransactions/test_19.tar,\ + trace_replayBlockTransactions/test_20.tar,\ + trace_replayBlockTransactions/test_21.tar,\ + trace_replayBlockTransactions/test_22.tar,\ + trace_replayBlockTransactions/test_23.tar,\ + trace_replayBlockTransactions/test_24.tar,\ + trace_replayBlockTransactions/test_25.tar,\ + trace_replayTransaction/test_02.tar,\ + trace_replayTransaction/test_03.tar,\ + trace_replayTransaction/test_04.tar,\ + trace_replayTransaction/test_05.tar,\ + trace_replayTransaction/test_06.tar,\ + trace_replayTransaction/test_07.tar,\ + trace_replayTransaction/test_10.tar,\ + trace_replayTransaction/test_11.tar,\ + trace_replayTransaction/test_14.tar,\ + trace_replayTransaction/test_16.tar,\ + trace_replayTransaction/test_18.tar,\ + trace_replayTransaction/test_23.tar,\ + trace_replayTransaction/test_24.json,\ + trace_replayTransaction/test_29.tar + + # Capture test runner script exit status + test_exit_status=$? + + # Save the subsection reached status + echo "::set-output name=test_executed::true" + + # Check test runner exit status + if [ $test_exit_status -eq 0 ]; then + echo "tests completed successfully" + echo + echo "TEST_RESULT=success" >> "$GITHUB_OUTPUT" + else + echo "error detected during tests" + echo "TEST_RESULT=failure" >> "$GITHUB_OUTPUT" + + # Save failed results to a directory with timestamp and commit hash + cp -r ${{ runner.workspace }}/rpc-tests/integration/mainnet/results/ $RPC_PAST_TEST_DIR/mainnet_$(date +%Y%m%d_%H%M%S)_integration_$commit_http/ + fi + + - name: Stop Erigon RpcDaemon + working-directory: ${{ github.workspace }}/build/bin + run: | + # Clean up rpcdaemon process if it's still running + if kill -0 $RPC_DAEMON_PID 2> /dev/null; then + echo "Erigon RpcDaemon stopping..." + kill $RPC_DAEMON_PID + echo "Erigon RpcDaemon stopped" + else + echo "Erigon RpcDaemon has already terminated" + fi + + - name: Delete Erigon Testbed Data Directory + if: always() + run: | + rm -rf $ERIGON_TESTBED_DATA_DIR + + - name: Resume the Erigon instance dedicated to db maintenance + run: | + python3 $ERIGON_QA_PATH/test_system/db-producer/resume_production.py || true + + - name: Upload test results + if: steps.test_step.outputs.test_executed == 'true' + uses: actions/upload-artifact@v4 + with: + name: test-results + path: ${{ runner.workspace }}/rpc-tests/integration/mainnet/results/ + + - name: Save test results + if: steps.test_step.outputs.test_executed == 'true' + working-directory: ${{ github.workspace }} + env: + TEST_RESULT: ${{ steps.test_step.outputs.TEST_RESULT }} + run: | + db_version=$(python3 $ERIGON_QA_PATH/test_system/qa-tests/uploads/prod_info.py $ERIGON_REFERENCE_DATA_DIR/../production.ini production erigon_repo_commit) + if [ -z "$db_version" ]; then + db_version="no-version" + fi + + python3 $ERIGON_QA_PATH/test_system/qa-tests/uploads/upload_test_results.py --repo erigon --commit $(git rev-parse HEAD) --branch ${{ github.ref_name }} --test_name rpc-integration-tests --chain $CHAIN --runner ${{ runner.name }} --db_version $db_version --outcome $TEST_RESULT #--result_file ${{ github.workspace }}/result-$CHAIN.json + + - name: Action for Success + if: steps.test_step.outputs.TEST_RESULT == 'success' + run: echo "::notice::Tests completed successfully" + + - name: Action for Failure + if: steps.test_step.outputs.TEST_RESULT != 'success' + run: | + echo "::error::Error detected during tests" + exit 1 + From e62c2fd57a37ddde5ba10446d8b5e84672500c08 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Fri, 30 Aug 2024 21:22:57 +0530 Subject: [PATCH 29/49] [Polygon] Bor: PIP-30: increased max code size limit to 32KB [For Erigon-2] (#11811) [Polygon] Bor: [PIP-30: increased max code size limit to 32KB](https://github.com/maticnetwork/Polygon-Improvement-Proposals/blob/main/PIPs/PIP-30.md) --- core/vm/evm.go | 9 ++++++++- erigon-lib/chain/chain_config.go | 1 + params/protocol_params.go | 5 +++-- polygon/bor/borcfg/bor_config.go | 5 +++++ 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index 2ce7ee887fa..e65557b4c8f 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -421,7 +421,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemainin ret, err = run(evm, contract, nil, false) // EIP-170: Contract code size limit - if err == nil && evm.chainRules.IsSpuriousDragon && len(ret) > params.MaxCodeSize { + if err == nil && evm.chainRules.IsSpuriousDragon && len(ret) > evm.maxCodeSize() { // Gnosis Chain prior to Shanghai didn't have EIP-170 enabled, // but EIP-3860 (part of Shanghai) requires EIP-170. if !evm.chainRules.IsAura || evm.config.HasEip3860(evm.chainRules) { @@ -462,6 +462,13 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemainin return ret, address, contract.Gas, err } +func (evm *EVM) maxCodeSize() int { + if evm.chainConfig.Bor != nil && evm.chainConfig.Bor.IsAhmedabad(evm.Context.BlockNumber) { + return params.MaxCodeSizePostAhmedabad + } + return params.MaxCodeSize +} + // Create creates a new contract using code as deployment code. // DESCRIBED: docs/programmers_guide/guide.md#nonce func (evm *EVM) Create(caller ContractRef, code []byte, gasRemaining uint64, endowment *uint256.Int, bailout bool) (ret []byte, contractAddr libcommon.Address, leftOverGas uint64, err error) { diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go index b02068b123f..9c61e3cc351 100644 --- a/erigon-lib/chain/chain_config.go +++ b/erigon-lib/chain/chain_config.go @@ -98,6 +98,7 @@ type BorConfig interface { GetAgraBlock() *big.Int IsNapoli(num uint64) bool GetNapoliBlock() *big.Int + IsAhmedabad(number uint64) bool } func (c *Config) String() string { diff --git a/params/protocol_params.go b/params/protocol_params.go index d760de8658d..c41e75d18e6 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -127,8 +127,9 @@ const ( ElasticityMultiplier = 2 // Bounds the maximum gas limit an EIP-1559 block may have. InitialBaseFee = 1000000000 // Initial base fee for EIP-1559 blocks. - MaxCodeSize = 24576 // Maximum bytecode to permit for a contract - MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions + MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxCodeSizePostAhmedabad = 32768 // Maximum bytecode to permit for a contract post Ahmedabad hard fork (bor / polygon pos) (32KB) + MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions // Precompiled contract gas prices diff --git a/polygon/bor/borcfg/bor_config.go b/polygon/bor/borcfg/bor_config.go index 6c02f069b82..db24874d745 100644 --- a/polygon/bor/borcfg/bor_config.go +++ b/polygon/bor/borcfg/bor_config.go @@ -25,6 +25,7 @@ type BorConfig struct { IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on Indore) AgraBlock *big.Int `json:"agraBlock"` // Agra switch block (nil = no fork, 0 = already on Agra) NapoliBlock *big.Int `json:"napoliBlock"` // Napoli switch block (nil = no fork, 0 = already on Napoli) + AhmedabadBlock *big.Int `json:"ahmedabadBlock"` // Ahmedabad switch block (nil = no fork, 0 = already on Ahmedabad) StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to` sprints sprints @@ -130,6 +131,10 @@ func (c *BorConfig) IsNapoli(num uint64) bool { return isForked(c.NapoliBlock, num) } +func (c *BorConfig) IsAhmedabad(number uint64) bool { + return isForked(c.AhmedabadBlock, number) +} + func (c *BorConfig) GetNapoliBlock() *big.Int { return c.NapoliBlock } From fbc4122038194c4455548ba736a8a15fc2f86919 Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com> Date: Sat, 31 Aug 2024 15:44:22 +0200 Subject: [PATCH 30/49] =?UTF-8?q?e2:=20fix=20regression=20in=20json=20enco?= =?UTF-8?q?ding=20of=20stack=20values=20in=20traces=20due=20to=20chan?= =?UTF-8?q?=E2=80=A6=20(#11810)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cherry-pick from PR #11061 from main (the regression is present also in release 2.60) Regression caused by this change in uint256 library: https://github.com/holiman/uint256/commit/f24ed59bea89c23941cf073aeb3f702514f3b371 It was causing trace json to encode stack values as quoted (string) decimal values rather than a hex encoded value. Co-authored-by: bgelb --- eth/tracers/logger/json_stream.go | 2 +- turbo/jsonrpc/trace_adhoc.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/eth/tracers/logger/json_stream.go b/eth/tracers/logger/json_stream.go index 5b616e81e7f..d6c87525321 100644 --- a/eth/tracers/logger/json_stream.go +++ b/eth/tracers/logger/json_stream.go @@ -139,7 +139,7 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 if i > 0 { l.stream.WriteMore() } - l.stream.WriteString(stackValue.String()) + l.stream.WriteString(stackValue.Hex()) } l.stream.WriteArrayEnd() } diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 99f7d259932..519437796de 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -492,7 +492,7 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop } for i := showStack - 1; i >= 0; i-- { if st.Len() > i { - ot.lastVmOp.Ex.Push = append(ot.lastVmOp.Ex.Push, st.Back(i).String()) + ot.lastVmOp.Ex.Push = append(ot.lastVmOp.Ex.Push, st.Back(i).Hex()) } } // Set the "mem" of the last operation @@ -512,7 +512,7 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop if ot.lastOffStack != nil { ot.lastOffStack.Ex.Used = int(gas) if st.Len() > 0 { - ot.lastOffStack.Ex.Push = []string{st.Back(0).String()} + ot.lastOffStack.Ex.Push = []string{st.Back(0).Hex()} } else { ot.lastOffStack.Ex.Push = []string{} } @@ -584,7 +584,7 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop ot.memLenStack = append(ot.memLenStack, 0) case vm.SSTORE: if st.Len() > 1 { - ot.lastVmOp.Ex.Store = &VmTraceStore{Key: st.Back(0).String(), Val: st.Back(1).String()} + ot.lastVmOp.Ex.Store = &VmTraceStore{Key: st.Back(0).Hex(), Val: st.Back(1).Hex()} } } if ot.lastVmOp.Ex.Used < 0 { From 801aa1bdf7a51536996d02c90c0defb84a5d4edf Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 1 Sep 2024 16:50:40 +0200 Subject: [PATCH 31/49] Cherry-pick: Nil-Ptr in `CurrentHeader` (#11807) --- turbo/execution/eth1/getters.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/turbo/execution/eth1/getters.go b/turbo/execution/eth1/getters.go index 7c94b1e6bca..877a1c53a32 100644 --- a/turbo/execution/eth1/getters.go +++ b/turbo/execution/eth1/getters.go @@ -259,6 +259,9 @@ func (e *EthereumExecutionModule) CurrentHeader(ctx context.Context, _ *emptypb. defer tx.Rollback() hash := rawdb.ReadHeadHeaderHash(tx) number := rawdb.ReadHeaderNumber(tx, hash) + if number == nil { + return nil, errors.New("ethereumExecutionModule.CurrentHeader: no current header yet - probabably node not synced yet") + } h, err := e.blockReader.Header(ctx, tx, hash, *number) if err != nil { return nil, fmt.Errorf("ethereumExecutionModule.CurrentHeader: blockReader.Header error %w", err) From 144bb2c91c8065cf6f761a2b3b396ada06aaaae0 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 1 Sep 2024 16:51:19 +0200 Subject: [PATCH 32/49] Cherry-pick caplin fixes to release (#11781) --- cl/antiquary/state_antiquary.go | 2 +- cl/beacon/handler/headers.go | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go index 94087884745..a616c7cc0c4 100644 --- a/cl/antiquary/state_antiquary.go +++ b/cl/antiquary/state_antiquary.go @@ -241,7 +241,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { defer progressTimer.Stop() prevSlot := slot first := false - blocksBeforeCommit := 350_000 + blocksBeforeCommit := 35_000 blocksProcessed := 0 for ; slot < to && blocksProcessed < blocksBeforeCommit; slot++ { diff --git a/cl/beacon/handler/headers.go b/cl/beacon/handler/headers.go index 2eb4b44b51c..9a0a52231af 100644 --- a/cl/beacon/handler/headers.go +++ b/cl/beacon/handler/headers.go @@ -31,7 +31,7 @@ func (a *ApiHandler) getHeaders(w http.ResponseWriter, r *http.Request) (*beacon var potentialRoot libcommon.Hash // First lets find some good candidates for the query. TODO(Giulio2002): this does not give all the headers. switch { - case queryParentHash != nil && querySlot != nil: + case queryParentHash != nil: // get all blocks with this parent slot, err = beacon_indicies.ReadBlockSlotByBlockRoot(tx, *queryParentHash) if err != nil { @@ -40,14 +40,13 @@ func (a *ApiHandler) getHeaders(w http.ResponseWriter, r *http.Request) (*beacon if slot == nil { break } - if *slot+1 != *querySlot { - break + for i := uint64(1); i < a.beaconChainCfg.SlotsPerEpoch; i++ { + potentialRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, (*slot)+i) + if err != nil { + return nil, err + } + candidates = append(candidates, potentialRoot) } - potentialRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *slot+1) - if err != nil { - return nil, err - } - candidates = append(candidates, potentialRoot) case queryParentHash == nil && querySlot != nil: potentialRoot, err = beacon_indicies.ReadCanonicalBlockRoot(tx, *querySlot) if err != nil { From afe96b3235a075fa727b9c1b5542dd599d0f68b1 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 3 Sep 2024 15:08:19 +0200 Subject: [PATCH 33/49] =?UTF-8?q?[CHERRY-PICK]=20Caplin:=20Hack=20for=20de?= =?UTF-8?q?layed=20fsync=20+=20do=20not=20generate=20snapshots=20if=20too?= =?UTF-8?q?=20man=E2=80=A6=20(#11849)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../freezeblocks/caplin_snapshots.go | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go index e458e91ade2..baae19776c6 100644 --- a/turbo/snapshotsync/freezeblocks/caplin_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/caplin_snapshots.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "sync/atomic" + "time" "github.com/klauspost/compress/zstd" "github.com/ledgerwatch/erigon-lib/common/datadir" @@ -381,6 +382,9 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, fromSlot uint64, toS } defer tx.Rollback() + skippedInARow := 0 + var prevBlockRoot libcommon.Hash + // Generate .seg file, which is just the list of beacon blocks. for i := fromSlot; i < toSlot; i++ { // read root. @@ -388,6 +392,14 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, fromSlot uint64, toS if err != nil { return err } + parentRoot, err := beacon_indicies.ReadParentBlockRoot(ctx, tx, blockRoot) + if err != nil { + return err + } + if blockRoot != (libcommon.Hash{}) && prevBlockRoot != (libcommon.Hash{}) && parentRoot != prevBlockRoot { + return fmt.Errorf("parent block root mismatch at slot %d", i) + } + dump, err := tx.GetOne(kv.BeaconBlocks, dbutils.BlockBodyKey(i, blockRoot)) if err != nil { return err @@ -395,10 +407,18 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, fromSlot uint64, toS if i%20_000 == 0 { logger.Log(lvl, "Dumping beacon blocks", "progress", i) } + if dump == nil { + skippedInARow++ + } else { + prevBlockRoot = blockRoot + skippedInARow = 0 + } + if skippedInARow > 1000 { + return fmt.Errorf("skipped too many blocks in a row during snapshot generation, range %d-%d at slot %d", fromSlot, toSlot, i) + } if err := sn.AddWord(dump); err != nil { return err } - } if err := sn.Compress(); err != nil { return fmt.Errorf("compress: %w", err) @@ -406,6 +426,9 @@ func dumpBeaconBlocksRange(ctx context.Context, db kv.RoDB, fromSlot uint64, toS // Generate .idx file, which is the slot => offset mapping. p := &background.Progress{} + // Ugly hack to wait for fsync + time.Sleep(15 * time.Second) + return BeaconSimpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) } From a6fd068ee362d051a76662f69ed73ba5ac8a28fb Mon Sep 17 00:00:00 2001 From: Manav Darji Date: Wed, 4 Sep 2024 15:32:15 +0530 Subject: [PATCH 34/49] Fix bor-heimdall stall after chaindb removal (#11863) In the first run, init validators was skipped due to the check. Updated it by checking the erigon3 code to init snapshot for 1st block (this logic is borrowed from main branch). cherry-pick adaptation of https://github.com/erigontech/erigon/pull/11725 --- eth/stagedsync/stage_bor_heimdall.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index 1c02bbd97e8..7db3c79223a 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -288,10 +288,15 @@ func BorHeimdallForward( return err } + // this will happen if for example chaindb is removed + if lastPersistedBlockNum > blockNum { + lastPersistedBlockNum = blockNum + } + // if the last time we persisted snapshots is too far away re-run the forward // initialization process - this is to avoid memory growth due to recusrion // in persistValidatorSets - if snap == nil && blockNum-lastPersistedBlockNum > (snapshotPersistInterval*5) { + if snap == nil && (blockNum == 1 || blockNum-lastPersistedBlockNum > (snapshotPersistInterval*5)) { snap, err = initValidatorSets( ctx, tx, From 2769237084e778db74ff230ebe38bac7396e5589 Mon Sep 17 00:00:00 2001 From: Somnath Date: Wed, 4 Sep 2024 16:58:30 +0400 Subject: [PATCH 35/49] core: Check `gasBailout` before deducting balance in trace_call (#11813) **Existing behaviour:** - Add up the possible value that user must pay beforehand to buy gas - Deduct that amount from the sender's account in `intraBlockState`, but: - Don't deduct the gas value amount if the user doesn't have enough, and `gasBailout` is set **New behaviour:** - Don't check if sender's balance is enough to pay gas value amount, nor deduct it, if `gasBailout` is set **More rationale** This would mean the sender's account would show `"balance": "="` in `trace_call` rpc method, that is, no change, if gas is the only thing the user pays for. This is fine because the gas price can fluctuate in a real transaction. This also removes the inconsistency of sometimes having to bother deducting the amount if it is less than sender's balance, thereby causing a bug/inconsistency. --- core/state_transition.go | 51 +++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/core/state_transition.go b/core/state_transition.go index bd4d9af1160..3f0649e9a44 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -219,49 +219,42 @@ func (st *StateTransition) buyGas(gasBailout bool) error { } } - balanceCheck := gasVal - if st.gasFeeCap != nil { - balanceCheck = st.sharedBuyGasBalance.SetUint64(st.msg.Gas()) - balanceCheck, overflow = balanceCheck.MulOverflow(balanceCheck, st.gasFeeCap) - if overflow { - return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) - } - balanceCheck, overflow = balanceCheck.AddOverflow(balanceCheck, st.value) - if overflow { - return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) - } - if st.evm.ChainRules().IsCancun { - maxBlobFee, overflow := new(uint256.Int).MulOverflow(st.msg.MaxFeePerBlobGas(), new(uint256.Int).SetUint64(st.msg.BlobGas())) + if !gasBailout { + balanceCheck := gasVal + if st.gasFeeCap != nil { + balanceCheck = st.sharedBuyGasBalance.SetUint64(st.msg.Gas()) + balanceCheck, overflow = balanceCheck.MulOverflow(balanceCheck, st.gasFeeCap) if overflow { return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) } - balanceCheck, overflow = balanceCheck.AddOverflow(balanceCheck, maxBlobFee) + balanceCheck, overflow = balanceCheck.AddOverflow(balanceCheck, st.value) if overflow { return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) } + if st.evm.ChainRules().IsCancun { + maxBlobFee, overflow := new(uint256.Int).MulOverflow(st.msg.MaxFeePerBlobGas(), new(uint256.Int).SetUint64(st.msg.BlobGas())) + if overflow { + return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) + } + balanceCheck, overflow = balanceCheck.AddOverflow(balanceCheck, maxBlobFee) + if overflow { + return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) + } + } } - } - var subBalance = false - if have, want := st.state.GetBalance(st.msg.From()), balanceCheck; have.Cmp(want) < 0 { - if !gasBailout { + if have, want := st.state.GetBalance(st.msg.From()), balanceCheck; have.Cmp(want) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From().Hex(), have, want) } - } else { - subBalance = true + st.state.SubBalance(st.msg.From(), gasVal) + st.state.SubBalance(st.msg.From(), blobGasVal) } + if err := st.gp.SubGas(st.msg.Gas()); err != nil { - if !gasBailout { - return err - } + return err } st.gasRemaining += st.msg.Gas() st.initialGas = st.msg.Gas() st.evm.BlobFee = blobGasVal - - if subBalance { - st.state.SubBalance(st.msg.From(), gasVal) - st.state.SubBalance(st.msg.From(), blobGasVal) - } return nil } @@ -426,7 +419,7 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi st.state.SetNonce(msg.From(), st.state.GetNonce(sender.Address())+1) ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), st.data, st.gasRemaining, st.value, bailout) } - if refunds { + if refunds && !gasBailout { if rules.IsLondon { // After EIP-3529: refunds are capped to gasUsed / 5 st.refundGas(params.RefundQuotientEIP3529) From 5ba8303457498b741c4409ad0ea65ac7844cb637 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:25:58 +0100 Subject: [PATCH 36/49] Refactor logsFilter to prevent concurrent map fatal errors (#10672) (#11892) relates to https://github.com/erigontech/erigon/issues/11890 cherry pick from E3 [2f2ce6afaa67d6d013bf7d785ed88557e8a9cc21](https://github.com/erigontech/erigon/commit/2f2ce6afaa67d6d013bf7d785ed88557e8a9cc21) ----- #### Issue: At line 129 in `logsfilter.go`, we had the following line of code: ```go _, addrOk := filter.addrs[gointerfaces.ConvertH160toAddress(eventLog.Address)] ``` This line caused a panic due to a fatal error: ```logs fatal error: concurrent map read and map write goroutine 106 [running]: github.com/ledgerwatch/erigon/turbo/rpchelper.(*LogsFilterAggregator).distributeLog.func1({0xc009701db8?, 0x8?}, 0xc135d26050) github.com/ledgerwatch/erigon/turbo/rpchelper/logsfilter.go:129 +0xe7 github.com/ledgerwatch/erigon/turbo/rpchelper.(*SyncMap[...]).Range(0xc009701eb0?, 0xc009701e70?) github.com/ledgerwatch/erigon/turbo/rpchelper/subscription.go:97 +0x11a github.com/ledgerwatch/erigon/turbo/rpchelper.(*LogsFilterAggregator).distributeLog(0x25f4600?, 0xc0000ce090?) github.com/ledgerwatch/erigon/turbo/rpchelper/logsfilter.go:131 +0xc7 github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).OnNewLogs(...) github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go:547 github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices.(*RemoteBackend).SubscribeLogs(0xc0019c2f50, {0x32f0040, 0xc001b4a280}, 0xc001c0c0e0, 0x0?) github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/eth_backend.go:227 +0x1d1 github.com/ledgerwatch/erigon/turbo/rpchelper.New.func2() github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go:102 +0xec created by github.com/ledgerwatch/erigon/turbo/rpchelper.New github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go:92 +0x652 ``` This error indicates that there were simultaneous read and write operations on the `filter.addrs` map, leading to a race condition. #### Solution: To resolve this issue, I implemented the following changes: - Moved SyncMap to erigon-lib common library: This allows us to utilize a thread-safe map across different packages that require synchronized map access. - Refactored logsFilter to use SyncMap: By replacing the standard map with SyncMap, we ensured that all map operations are thread-safe, thus preventing concurrent read and write errors. - Added documentation for SyncMap usage: Detailed documentation was provided to guide the usage of SyncMap and related refactored components, ensuring clarity and proper utilization. Co-authored-by: Bret <787344+bretep@users.noreply.github.com> --- erigon-lib/common/concurrent/concurrent.go | 79 ++++++++++++ turbo/rpchelper/filters.go | 120 ++++++++++++++----- turbo/rpchelper/filters_test.go | 20 ++-- turbo/rpchelper/logsfilter.go | 133 +++++++++++++++------ turbo/rpchelper/subscription.go | 66 ---------- 5 files changed, 276 insertions(+), 142 deletions(-) create mode 100644 erigon-lib/common/concurrent/concurrent.go diff --git a/erigon-lib/common/concurrent/concurrent.go b/erigon-lib/common/concurrent/concurrent.go new file mode 100644 index 00000000000..a29301ea79b --- /dev/null +++ b/erigon-lib/common/concurrent/concurrent.go @@ -0,0 +1,79 @@ +package concurrent + +import "sync" + +// NewSyncMap initializes and returns a new instance of SyncMap. +func NewSyncMap[K comparable, T any]() *SyncMap[K, T] { + return &SyncMap[K, T]{ + m: make(map[K]T), + } +} + +// SyncMap is a generic map that uses a read-write mutex to ensure thread-safe access. +type SyncMap[K comparable, T any] struct { + m map[K]T + mu sync.RWMutex +} + +// Get retrieves the value associated with the given key. +func (m *SyncMap[K, T]) Get(k K) (res T, ok bool) { + m.mu.RLock() + defer m.mu.RUnlock() + res, ok = m.m[k] + return res, ok +} + +// Put sets the value for the given key, returning the previous value if present. +func (m *SyncMap[K, T]) Put(k K, v T) (T, bool) { + m.mu.Lock() + defer m.mu.Unlock() + old, ok := m.m[k] + m.m[k] = v + return old, ok +} + +// Do performs a custom operation on the value associated with the given key. +func (m *SyncMap[K, T]) Do(k K, fn func(T, bool) (T, bool)) (after T, ok bool) { + m.mu.Lock() + defer m.mu.Unlock() + val, ok := m.m[k] + nv, save := fn(val, ok) + if save { + m.m[k] = nv + } else { + delete(m.m, k) + } + return nv, ok +} + +// DoAndStore performs a custom operation on the value associated with the given key and stores the result. +func (m *SyncMap[K, T]) DoAndStore(k K, fn func(t T, ok bool) T) (after T, ok bool) { + return m.Do(k, func(t T, b bool) (T, bool) { + res := fn(t, b) + return res, true + }) +} + +// Range calls a function for each key-value pair in the map. +func (m *SyncMap[K, T]) Range(fn func(k K, v T) error) error { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.m { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Delete removes the value associated with the given key, if present. +func (m *SyncMap[K, T]) Delete(k K) (t T, deleted bool) { + m.mu.Lock() + defer m.mu.Unlock() + val, ok := m.m[k] + if !ok { + return t, false + } + delete(m.m, k) + return val, true +} diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index 052b8c9071b..e3fed0d1a29 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -13,6 +13,7 @@ import ( "time" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/concurrent" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" @@ -26,39 +27,45 @@ import ( "github.com/ledgerwatch/erigon/rlp" ) +// Filters holds the state for managing subscriptions to various Ethereum events. +// It allows for the subscription and management of events such as new blocks, pending transactions, +// logs, and other Ethereum-related activities. type Filters struct { mu sync.RWMutex pendingBlock *types.Block - headsSubs *SyncMap[HeadsSubID, Sub[*types.Header]] - pendingLogsSubs *SyncMap[PendingLogsSubID, Sub[types.Logs]] - pendingBlockSubs *SyncMap[PendingBlockSubID, Sub[*types.Block]] - pendingTxsSubs *SyncMap[PendingTxsSubID, Sub[[]types.Transaction]] + headsSubs *concurrent.SyncMap[HeadsSubID, Sub[*types.Header]] + pendingLogsSubs *concurrent.SyncMap[PendingLogsSubID, Sub[types.Logs]] + pendingBlockSubs *concurrent.SyncMap[PendingBlockSubID, Sub[*types.Block]] + pendingTxsSubs *concurrent.SyncMap[PendingTxsSubID, Sub[[]types.Transaction]] logsSubs *LogsFilterAggregator logsRequestor atomic.Value onNewSnapshot func() storeMu sync.Mutex - logsStores *SyncMap[LogsSubID, []*types.Log] - pendingHeadsStores *SyncMap[HeadsSubID, []*types.Header] - pendingTxsStores *SyncMap[PendingTxsSubID, [][]types.Transaction] + logsStores *concurrent.SyncMap[LogsSubID, []*types.Log] + pendingHeadsStores *concurrent.SyncMap[HeadsSubID, []*types.Header] + pendingTxsStores *concurrent.SyncMap[PendingTxsSubID, [][]types.Transaction] logger log.Logger } +// New creates a new Filters instance, initializes it, and starts subscription goroutines for Ethereum events. +// It requires a context, Ethereum backend, transaction pool client, mining client, snapshot callback function, +// and a logger for logging events. func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, onNewSnapshot func(), logger log.Logger) *Filters { logger.Info("rpc filters: subscribing to Erigon events") ff := &Filters{ - headsSubs: NewSyncMap[HeadsSubID, Sub[*types.Header]](), - pendingTxsSubs: NewSyncMap[PendingTxsSubID, Sub[[]types.Transaction]](), - pendingLogsSubs: NewSyncMap[PendingLogsSubID, Sub[types.Logs]](), - pendingBlockSubs: NewSyncMap[PendingBlockSubID, Sub[*types.Block]](), + headsSubs: concurrent.NewSyncMap[HeadsSubID, Sub[*types.Header]](), + pendingTxsSubs: concurrent.NewSyncMap[PendingTxsSubID, Sub[[]types.Transaction]](), + pendingLogsSubs: concurrent.NewSyncMap[PendingLogsSubID, Sub[types.Logs]](), + pendingBlockSubs: concurrent.NewSyncMap[PendingBlockSubID, Sub[*types.Block]](), logsSubs: NewLogsFilterAggregator(), onNewSnapshot: onNewSnapshot, - logsStores: NewSyncMap[LogsSubID, []*types.Log](), - pendingHeadsStores: NewSyncMap[HeadsSubID, []*types.Header](), - pendingTxsStores: NewSyncMap[PendingTxsSubID, [][]types.Transaction](), + logsStores: concurrent.NewSyncMap[LogsSubID, []*types.Log](), + pendingHeadsStores: concurrent.NewSyncMap[HeadsSubID, []*types.Header](), + pendingTxsStores: concurrent.NewSyncMap[PendingTxsSubID, [][]types.Transaction](), logger: logger, } @@ -185,12 +192,15 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, return ff } +// LastPendingBlock returns the last pending block that was received. func (ff *Filters) LastPendingBlock() *types.Block { ff.mu.RLock() defer ff.mu.RUnlock() return ff.pendingBlock } +// subscribeToPendingTransactions subscribes to pending transactions using the given transaction pool client. +// It listens for new transactions and processes them as they arrive. func (ff *Filters) subscribeToPendingTransactions(ctx context.Context, txPool txpool.TxpoolClient) error { subscription, err := txPool.OnAdd(ctx, &txpool.OnAddRequest{}, grpc.WaitForReady(true)) if err != nil { @@ -211,6 +221,8 @@ func (ff *Filters) subscribeToPendingTransactions(ctx context.Context, txPool tx return nil } +// subscribeToPendingBlocks subscribes to pending blocks using the given mining client. +// It listens for new pending blocks and processes them as they arrive. func (ff *Filters) subscribeToPendingBlocks(ctx context.Context, mining txpool.MiningClient) error { subscription, err := mining.OnPendingBlock(ctx, &txpool.OnPendingBlockRequest{}, grpc.WaitForReady(true)) if err != nil { @@ -237,6 +249,8 @@ func (ff *Filters) subscribeToPendingBlocks(ctx context.Context, mining txpool.M return nil } +// HandlePendingBlock handles a new pending block received from the mining client. +// It updates the internal state and notifies subscribers about the new block. func (ff *Filters) HandlePendingBlock(reply *txpool.OnPendingBlockReply) { b := &types.Block{} if reply == nil || len(reply.RplBlock) == 0 { @@ -256,6 +270,8 @@ func (ff *Filters) HandlePendingBlock(reply *txpool.OnPendingBlockReply) { }) } +// subscribeToPendingLogs subscribes to pending logs using the given mining client. +// It listens for new pending logs and processes them as they arrive. func (ff *Filters) subscribeToPendingLogs(ctx context.Context, mining txpool.MiningClient) error { subscription, err := mining.OnPendingLogs(ctx, &txpool.OnPendingLogsRequest{}, grpc.WaitForReady(true)) if err != nil { @@ -281,6 +297,8 @@ func (ff *Filters) subscribeToPendingLogs(ctx context.Context, mining txpool.Min return nil } +// HandlePendingLogs handles new pending logs received from the mining client. +// It updates the internal state and notifies subscribers about the new logs. func (ff *Filters) HandlePendingLogs(reply *txpool.OnPendingLogsReply) { if len(reply.RplLogs) == 0 { return @@ -295,6 +313,8 @@ func (ff *Filters) HandlePendingLogs(reply *txpool.OnPendingLogsReply) { }) } +// SubscribeNewHeads subscribes to new block headers and returns a channel to receive the headers +// and a subscription ID to manage the subscription. func (ff *Filters) SubscribeNewHeads(size int) (<-chan *types.Header, HeadsSubID) { id := HeadsSubID(generateSubscriptionID()) sub := newChanSub[*types.Header](size) @@ -302,6 +322,8 @@ func (ff *Filters) SubscribeNewHeads(size int) (<-chan *types.Header, HeadsSubID return sub.ch, id } +// UnsubscribeHeads unsubscribes from new block headers using the given subscription ID. +// It returns true if the unsubscription was successful, otherwise false. func (ff *Filters) UnsubscribeHeads(id HeadsSubID) bool { ch, ok := ff.headsSubs.Get(id) if !ok { @@ -315,6 +337,8 @@ func (ff *Filters) UnsubscribeHeads(id HeadsSubID) bool { return true } +// SubscribePendingLogs subscribes to pending logs and returns a channel to receive the logs +// and a subscription ID to manage the subscription. It uses the specified filter criteria. func (ff *Filters) SubscribePendingLogs(size int) (<-chan types.Logs, PendingLogsSubID) { id := PendingLogsSubID(generateSubscriptionID()) sub := newChanSub[types.Logs](size) @@ -322,6 +346,7 @@ func (ff *Filters) SubscribePendingLogs(size int) (<-chan types.Logs, PendingLog return sub.ch, id } +// UnsubscribePendingLogs unsubscribes from pending logs using the given subscription ID. func (ff *Filters) UnsubscribePendingLogs(id PendingLogsSubID) { ch, ok := ff.pendingLogsSubs.Get(id) if !ok { @@ -331,6 +356,8 @@ func (ff *Filters) UnsubscribePendingLogs(id PendingLogsSubID) { ff.pendingLogsSubs.Delete(id) } +// SubscribePendingBlock subscribes to pending blocks and returns a channel to receive the blocks +// and a subscription ID to manage the subscription. func (ff *Filters) SubscribePendingBlock(size int) (<-chan *types.Block, PendingBlockSubID) { id := PendingBlockSubID(generateSubscriptionID()) sub := newChanSub[*types.Block](size) @@ -338,6 +365,7 @@ func (ff *Filters) SubscribePendingBlock(size int) (<-chan *types.Block, Pending return sub.ch, id } +// UnsubscribePendingBlock unsubscribes from pending blocks using the given subscription ID. func (ff *Filters) UnsubscribePendingBlock(id PendingBlockSubID) { ch, ok := ff.pendingBlockSubs.Get(id) if !ok { @@ -347,6 +375,8 @@ func (ff *Filters) UnsubscribePendingBlock(id PendingBlockSubID) { ff.pendingBlockSubs.Delete(id) } +// SubscribePendingTxs subscribes to pending transactions and returns a channel to receive the transactions +// and a subscription ID to manage the subscription. func (ff *Filters) SubscribePendingTxs(size int) (<-chan []types.Transaction, PendingTxsSubID) { id := PendingTxsSubID(generateSubscriptionID()) sub := newChanSub[[]types.Transaction](size) @@ -354,6 +384,8 @@ func (ff *Filters) SubscribePendingTxs(size int) (<-chan []types.Transaction, Pe return sub.ch, id } +// UnsubscribePendingTxs unsubscribes from pending transactions using the given subscription ID. +// It returns true if the unsubscription was successful, otherwise false. func (ff *Filters) UnsubscribePendingTxs(id PendingTxsSubID) bool { ch, ok := ff.pendingTxsSubs.Get(id) if !ok { @@ -367,31 +399,45 @@ func (ff *Filters) UnsubscribePendingTxs(id PendingTxsSubID) bool { return true } -func (ff *Filters) SubscribeLogs(size int, crit filters.FilterCriteria) (<-chan *types.Log, LogsSubID) { +// SubscribeLogs subscribes to logs using the specified filter criteria and returns a channel to receive the logs +// and a subscription ID to manage the subscription. +func (ff *Filters) SubscribeLogs(size int, criteria filters.FilterCriteria) (<-chan *types.Log, LogsSubID) { sub := newChanSub[*types.Log](size) id, f := ff.logsSubs.insertLogsFilter(sub) - f.addrs = map[libcommon.Address]int{} - if len(crit.Addresses) == 0 { + + // Initialize address and topic maps + f.addrs = concurrent.NewSyncMap[libcommon.Address, int]() + f.topics = concurrent.NewSyncMap[libcommon.Hash, int]() + + // Handle addresses + if len(criteria.Addresses) == 0 { + // If no addresses are specified, it means all addresses should be included f.allAddrs = 1 } else { - for _, addr := range crit.Addresses { - f.addrs[addr] = 1 + for _, addr := range criteria.Addresses { + f.addrs.Put(addr, 1) } } - f.topics = map[libcommon.Hash]int{} - if len(crit.Topics) == 0 { + + // Handle topics + if len(criteria.Topics) == 0 { + // If no topics are specified, it means all topics should be included f.allTopics = 1 } else { - for _, topics := range crit.Topics { + for _, topics := range criteria.Topics { for _, topic := range topics { - f.topics[topic] = 1 + f.topics.Put(topic, 1) } } } - f.topicsOriginal = crit.Topics + + // Store original topics for reference + f.topicsOriginal = criteria.Topics + + // Add the filter to the list of log filters ff.logsSubs.addLogsFilters(f) - // if any filter in the aggregate needs all addresses or all topics then the global log subscription needs to - // allow all addresses or topics through + + // Create a filter request based on the aggregated filters lfr := ff.logsSubs.createFilterRequest() addresses, topics := ff.logsSubs.getAggMaps() for addr := range addresses { @@ -412,12 +458,15 @@ func (ff *Filters) SubscribeLogs(size int, crit filters.FilterCriteria) (<-chan return sub.ch, id } +// loadLogsRequester loads the current logs requester and returns it. func (ff *Filters) loadLogsRequester() any { ff.mu.Lock() defer ff.mu.Unlock() return ff.logsRequestor.Load() } +// UnsubscribeLogs unsubscribes from logs using the given subscription ID. +// It returns true if the unsubscription was successful, otherwise false. func (ff *Filters) UnsubscribeLogs(id LogsSubID) bool { isDeleted := ff.logsSubs.removeLogsFilter(id) // if any filters in the aggregate need all addresses or all topics then the request to the central @@ -445,11 +494,12 @@ func (ff *Filters) UnsubscribeLogs(id LogsSubID) bool { return isDeleted } +// deleteLogStore deletes the log store associated with the given subscription ID. func (ff *Filters) deleteLogStore(id LogsSubID) { ff.logsStores.Delete(id) } -// OnNewEvent is called when there is a new Event from the remote +// OnNewEvent is called when there is a new event from the remote and processes it. func (ff *Filters) OnNewEvent(event *remote.SubscribeReply) { err := ff.onNewEvent(event) if err != nil { @@ -457,6 +507,7 @@ func (ff *Filters) OnNewEvent(event *remote.SubscribeReply) { } } +// onNewEvent processes the given event from the remote and updates the internal state. func (ff *Filters) onNewEvent(event *remote.SubscribeReply) error { switch event.Type { case remote.Event_HEADER: @@ -474,6 +525,7 @@ func (ff *Filters) onNewEvent(event *remote.SubscribeReply) error { } // TODO: implement? +// onPendingLog handles a new pending log event from the remote. func (ff *Filters) onPendingLog(event *remote.SubscribeReply) error { // payload := event.Data // var logs types.Logs @@ -490,6 +542,7 @@ func (ff *Filters) onPendingLog(event *remote.SubscribeReply) error { } // TODO: implement? +// onPendingBlock handles a new pending block event from the remote. func (ff *Filters) onPendingBlock(event *remote.SubscribeReply) error { // payload := event.Data // var block types.Block @@ -505,6 +558,7 @@ func (ff *Filters) onPendingBlock(event *remote.SubscribeReply) error { return nil } +// onNewHeader handles a new block header event from the remote and updates the internal state. func (ff *Filters) onNewHeader(event *remote.SubscribeReply) error { payload := event.Data var header types.Header @@ -521,6 +575,7 @@ func (ff *Filters) onNewHeader(event *remote.SubscribeReply) error { }) } +// OnNewTx handles a new transaction event from the transaction pool and processes it. func (ff *Filters) OnNewTx(reply *txpool.OnAddReply) { txs := make([]types.Transaction, len(reply.RplTxs)) for i, rlpTx := range reply.RplTxs { @@ -541,11 +596,12 @@ func (ff *Filters) OnNewTx(reply *txpool.OnAddReply) { }) } -// OnNewLogs is called when there is a new log +// OnNewLogs handles a new log event from the remote and processes it. func (ff *Filters) OnNewLogs(reply *remote.SubscribeLogsReply) { ff.logsSubs.distributeLog(reply) } +// AddLogs adds logs to the store associated with the given subscription ID. func (ff *Filters) AddLogs(id LogsSubID, logs *types.Log) { ff.logsStores.DoAndStore(id, func(st []*types.Log, ok bool) []*types.Log { if !ok { @@ -556,6 +612,8 @@ func (ff *Filters) AddLogs(id LogsSubID, logs *types.Log) { }) } +// ReadLogs reads logs from the store associated with the given subscription ID. +// It returns the logs and a boolean indicating whether the logs were found. func (ff *Filters) ReadLogs(id LogsSubID) ([]*types.Log, bool) { res, ok := ff.logsStores.Delete(id) if !ok { @@ -564,6 +622,7 @@ func (ff *Filters) ReadLogs(id LogsSubID) ([]*types.Log, bool) { return res, true } +// AddPendingBlock adds a pending block header to the store associated with the given subscription ID. func (ff *Filters) AddPendingBlock(id HeadsSubID, block *types.Header) { ff.pendingHeadsStores.DoAndStore(id, func(st []*types.Header, ok bool) []*types.Header { if !ok { @@ -574,6 +633,8 @@ func (ff *Filters) AddPendingBlock(id HeadsSubID, block *types.Header) { }) } +// ReadPendingBlocks reads pending block headers from the store associated with the given subscription ID. +// It returns the block headers and a boolean indicating whether the headers were found. func (ff *Filters) ReadPendingBlocks(id HeadsSubID) ([]*types.Header, bool) { res, ok := ff.pendingHeadsStores.Delete(id) if !ok { @@ -582,6 +643,7 @@ func (ff *Filters) ReadPendingBlocks(id HeadsSubID) ([]*types.Header, bool) { return res, true } +// AddPendingTxs adds pending transactions to the store associated with the given subscription ID. func (ff *Filters) AddPendingTxs(id PendingTxsSubID, txs []types.Transaction) { ff.pendingTxsStores.DoAndStore(id, func(st [][]types.Transaction, ok bool) [][]types.Transaction { if !ok { @@ -592,6 +654,8 @@ func (ff *Filters) AddPendingTxs(id PendingTxsSubID, txs []types.Transaction) { }) } +// ReadPendingTxs reads pending transactions from the store associated with the given subscription ID. +// It returns the transactions and a boolean indicating whether the transactions were found. func (ff *Filters) ReadPendingTxs(id PendingTxsSubID) ([][]types.Transaction, bool) { res, ok := ff.pendingTxsStores.Delete(id) if !ok { diff --git a/turbo/rpchelper/filters_test.go b/turbo/rpchelper/filters_test.go index 087a027348d..f90eb3912d1 100644 --- a/turbo/rpchelper/filters_test.go +++ b/turbo/rpchelper/filters_test.go @@ -270,7 +270,7 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { if lastFilterRequest.AllTopics == false { t.Error("2: expected all topics to be true") } - if len(lastFilterRequest.Addresses) != 1 && lastFilterRequest.Addresses[0] != address1H160 { + if len(lastFilterRequest.Addresses) != 1 && gointerfaces.ConvertH160toAddress(lastFilterRequest.Addresses[0]) != gointerfaces.ConvertH160toAddress(address1H160) { t.Error("2: expected the address to match the last request") } @@ -288,10 +288,10 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { if lastFilterRequest.AllTopics == false { t.Error("3: expected all topics to be true") } - if len(lastFilterRequest.Addresses) != 1 && lastFilterRequest.Addresses[0] != address1H160 { + if len(lastFilterRequest.Addresses) != 1 && gointerfaces.ConvertH160toAddress(lastFilterRequest.Addresses[0]) != gointerfaces.ConvertH160toAddress(address1H160) { t.Error("3: expected the address to match the previous request") } - if len(lastFilterRequest.Topics) != 1 && lastFilterRequest.Topics[0] != topic1H256 { + if len(lastFilterRequest.Topics) != 1 && gointerfaces.ConvertH256ToHash(lastFilterRequest.Topics[0]) != gointerfaces.ConvertH256ToHash(topic1H256) { t.Error("3: expected the topics to match the last request") } @@ -307,10 +307,10 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { if lastFilterRequest.AllTopics == false { t.Error("4: expected all topics to be true") } - if len(lastFilterRequest.Addresses) != 1 && lastFilterRequest.Addresses[0] != address1H160 { + if len(lastFilterRequest.Addresses) != 1 && gointerfaces.ConvertH160toAddress(lastFilterRequest.Addresses[0]) != gointerfaces.ConvertH160toAddress(address1H160) { t.Error("4: expected an address to be present") } - if len(lastFilterRequest.Topics) != 1 && lastFilterRequest.Topics[0] != topic1H256 { + if len(lastFilterRequest.Topics) != 1 && gointerfaces.ConvertH256ToHash(lastFilterRequest.Topics[0]) != gointerfaces.ConvertH256ToHash(topic1H256) { t.Error("4: expected a topic to be present") } @@ -327,7 +327,7 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { if len(lastFilterRequest.Addresses) != 0 { t.Error("5: expected addresses to be empty") } - if len(lastFilterRequest.Topics) != 1 && lastFilterRequest.Topics[0] != topic1H256 { + if len(lastFilterRequest.Topics) != 1 && gointerfaces.ConvertH256ToHash(lastFilterRequest.Topics[0]) != gointerfaces.ConvertH256ToHash(topic1H256) { t.Error("5: expected a topic to be present") } @@ -335,15 +335,15 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { // and nothing in the address or topics lists f.UnsubscribeLogs(id3) if lastFilterRequest.AllAddresses == true { - t.Error("5: expected all addresses to be false") + t.Error("6: expected all addresses to be false") } if lastFilterRequest.AllTopics == true { - t.Error("5: expected all topics to be false") + t.Error("6: expected all topics to be false") } if len(lastFilterRequest.Addresses) != 0 { - t.Error("5: expected addresses to be empty") + t.Error("6: expected addresses to be empty") } if len(lastFilterRequest.Topics) != 0 { - t.Error("5: expected topics to be empty") + t.Error("6: expected topics to be empty") } } diff --git a/turbo/rpchelper/logsfilter.go b/turbo/rpchelper/logsfilter.go index 9b14eb2ea7e..8a6c3e06043 100644 --- a/turbo/rpchelper/logsfilter.go +++ b/turbo/rpchelper/logsfilter.go @@ -4,6 +4,7 @@ import ( "sync" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/concurrent" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" @@ -11,56 +12,76 @@ import ( ) type LogsFilterAggregator struct { - aggLogsFilter LogsFilter // Aggregation of all current log filters - logsFilters *SyncMap[LogsSubID, *LogsFilter] // Filter for each subscriber, keyed by filterID + aggLogsFilter LogsFilter // Aggregation of all current log filters + logsFilters *concurrent.SyncMap[LogsSubID, *LogsFilter] // Filter for each subscriber, keyed by filterID logsFilterLock sync.RWMutex } // LogsFilter is used for both representing log filter for a specific subscriber (RPC daemon usually) // and "aggregated" log filter representing a union of all subscribers. Therefore, the values in -// the mappings are counters (of type int) and they get deleted when counter goes back to 0 -// Also, addAddr and allTopic are int instead of bool because they are also counter, counting -// how many subscribers have this set on +// the mappings are counters (of type int) and they get deleted when counter goes back to 0. +// Also, addAddr and allTopic are int instead of bool because they are also counters, counting +// how many subscribers have this set on. type LogsFilter struct { allAddrs int - addrs map[libcommon.Address]int + addrs *concurrent.SyncMap[libcommon.Address, int] allTopics int - topics map[libcommon.Hash]int + topics *concurrent.SyncMap[libcommon.Hash, int] topicsOriginal [][]libcommon.Hash // Original topic filters to be applied before distributing to individual subscribers sender Sub[*types2.Log] // nil for aggregate subscriber, for appropriate stream server otherwise } +// Send sends a log to the subscriber represented by the LogsFilter. +// It forwards the log to the subscriber's sender. func (l *LogsFilter) Send(lg *types2.Log) { l.sender.Send(lg) } + +// Close closes the sender associated with the LogsFilter. +// It is used to properly clean up and release resources associated with the sender. func (l *LogsFilter) Close() { l.sender.Close() } +// NewLogsFilterAggregator creates and returns a new instance of LogsFilterAggregator. +// It initializes the aggregated log filter and the map of individual log filters. func NewLogsFilterAggregator() *LogsFilterAggregator { return &LogsFilterAggregator{ aggLogsFilter: LogsFilter{ - addrs: make(map[libcommon.Address]int), - topics: make(map[libcommon.Hash]int), + addrs: concurrent.NewSyncMap[libcommon.Address, int](), + topics: concurrent.NewSyncMap[libcommon.Hash, int](), }, - logsFilters: NewSyncMap[LogsSubID, *LogsFilter](), + logsFilters: concurrent.NewSyncMap[LogsSubID, *LogsFilter](), } } +// insertLogsFilter inserts a new log filter into the LogsFilterAggregator with the specified sender. +// It generates a new filter ID, creates a new LogsFilter, and adds it to the logsFilters map. func (a *LogsFilterAggregator) insertLogsFilter(sender Sub[*types2.Log]) (LogsSubID, *LogsFilter) { + a.logsFilterLock.Lock() + defer a.logsFilterLock.Unlock() filterId := LogsSubID(generateSubscriptionID()) - filter := &LogsFilter{addrs: map[libcommon.Address]int{}, topics: map[libcommon.Hash]int{}, sender: sender} + filter := &LogsFilter{ + addrs: concurrent.NewSyncMap[libcommon.Address, int](), + topics: concurrent.NewSyncMap[libcommon.Hash, int](), + sender: sender, + } a.logsFilters.Put(filterId, filter) return filterId, filter } +// removeLogsFilter removes a log filter identified by filterId from the LogsFilterAggregator. +// It closes the filter and subtracts its addresses and topics from the aggregated filter. func (a *LogsFilterAggregator) removeLogsFilter(filterId LogsSubID) bool { + a.logsFilterLock.Lock() + defer a.logsFilterLock.Unlock() + filter, ok := a.logsFilters.Get(filterId) if !ok { return false } filter.Close() - filter, ok = a.logsFilters.Delete(filterId) + _, ok = a.logsFilters.Delete(filterId) if !ok { return false } @@ -68,6 +89,8 @@ func (a *LogsFilterAggregator) removeLogsFilter(filterId LogsSubID) bool { return true } +// createFilterRequest creates a LogsFilterRequest from the current state of the LogsFilterAggregator. +// It generates a request that represents the union of all current log filters. func (a *LogsFilterAggregator) createFilterRequest() *remote.LogsFilterRequest { a.logsFilterLock.RLock() defer a.logsFilterLock.RUnlock() @@ -77,56 +100,88 @@ func (a *LogsFilterAggregator) createFilterRequest() *remote.LogsFilterRequest { } } +// subtractLogFilters subtracts the counts of addresses and topics in the given LogsFilter from the aggregated filter. +// It decrements the counters for each address and topic in the aggregated filter by the corresponding counts in the +// provided LogsFilter. If the count for any address or topic reaches zero, it is removed from the aggregated filter. func (a *LogsFilterAggregator) subtractLogFilters(f *LogsFilter) { - a.logsFilterLock.Lock() - defer a.logsFilterLock.Unlock() a.aggLogsFilter.allAddrs -= f.allAddrs - for addr, count := range f.addrs { - a.aggLogsFilter.addrs[addr] -= count - if a.aggLogsFilter.addrs[addr] == 0 { - delete(a.aggLogsFilter.addrs, addr) - } - } + f.addrs.Range(func(addr libcommon.Address, count int) error { + a.aggLogsFilter.addrs.Do(addr, func(value int, exists bool) (int, bool) { + if exists { + newValue := value - count + if newValue <= 0 { + return 0, false + } + return newValue, true + } + return 0, false + }) + return nil + }) a.aggLogsFilter.allTopics -= f.allTopics - for topic, count := range f.topics { - a.aggLogsFilter.topics[topic] -= count - if a.aggLogsFilter.topics[topic] == 0 { - delete(a.aggLogsFilter.topics, topic) - } - } + f.topics.Range(func(topic libcommon.Hash, count int) error { + a.aggLogsFilter.topics.Do(topic, func(value int, exists bool) (int, bool) { + if exists { + newValue := value - count + if newValue <= 0 { + return 0, false + } + return newValue, true + } + return 0, false + }) + return nil + }) } +// addLogsFilters adds the counts of addresses and topics in the given LogsFilter to the aggregated filter. +// It increments the counters for each address and topic in the aggregated filter by the corresponding counts in the +// provided LogsFilter. func (a *LogsFilterAggregator) addLogsFilters(f *LogsFilter) { a.logsFilterLock.Lock() defer a.logsFilterLock.Unlock() a.aggLogsFilter.allAddrs += f.allAddrs - for addr, count := range f.addrs { - a.aggLogsFilter.addrs[addr] += count - } + f.addrs.Range(func(addr libcommon.Address, count int) error { + a.aggLogsFilter.addrs.DoAndStore(addr, func(value int, exists bool) int { + return value + count + }) + return nil + }) a.aggLogsFilter.allTopics += f.allTopics - for topic, count := range f.topics { - a.aggLogsFilter.topics[topic] += count - } + f.topics.Range(func(topic libcommon.Hash, count int) error { + a.aggLogsFilter.topics.DoAndStore(topic, func(value int, exists bool) int { + return value + count + }) + return nil + }) } +// getAggMaps returns the aggregated maps of addresses and topics from the LogsFilterAggregator. +// It creates copies of the current state of the aggregated addresses and topics filters. func (a *LogsFilterAggregator) getAggMaps() (map[libcommon.Address]int, map[libcommon.Hash]int) { a.logsFilterLock.RLock() defer a.logsFilterLock.RUnlock() addresses := make(map[libcommon.Address]int) - for k, v := range a.aggLogsFilter.addrs { + a.aggLogsFilter.addrs.Range(func(k libcommon.Address, v int) error { addresses[k] = v - } + return nil + }) topics := make(map[libcommon.Hash]int) - for k, v := range a.aggLogsFilter.topics { + a.aggLogsFilter.topics.Range(func(k libcommon.Hash, v int) error { topics[k] = v - } + return nil + }) return addresses, topics } +// distributeLog processes an event log and distributes it to all subscribed log filters. +// It checks each filter to determine if the log should be sent based on the filter's address and topic settings. func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply) error { + a.logsFilterLock.RLock() + defer a.logsFilterLock.RUnlock() a.logsFilters.Range(func(k LogsSubID, filter *LogsFilter) error { if filter.allAddrs == 0 { - _, addrOk := filter.addrs[gointerfaces.ConvertH160toAddress(eventLog.Address)] + _, addrOk := filter.addrs.Get(gointerfaces.ConvertH160toAddress(eventLog.Address)) if !addrOk { return nil } @@ -157,10 +212,12 @@ func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply return nil } +// chooseTopics checks if the log topics match the filter's topics. +// It returns true if the log topics match the filter's topics, otherwise false. func (a *LogsFilterAggregator) chooseTopics(filter *LogsFilter, logTopics []libcommon.Hash) bool { var found bool for _, logTopic := range logTopics { - if _, ok := filter.topics[logTopic]; ok { + if _, ok := filter.topics.Get(logTopic); ok { found = true break } diff --git a/turbo/rpchelper/subscription.go b/turbo/rpchelper/subscription.go index 6fb57b151d0..e86e46f52de 100644 --- a/turbo/rpchelper/subscription.go +++ b/turbo/rpchelper/subscription.go @@ -45,69 +45,3 @@ func (s *chan_sub[T]) Close() { s.closed = true close(s.ch) } - -func NewSyncMap[K comparable, T any]() *SyncMap[K, T] { - return &SyncMap[K, T]{ - m: make(map[K]T), - } -} - -type SyncMap[K comparable, T any] struct { - m map[K]T - mu sync.RWMutex -} - -func (m *SyncMap[K, T]) Get(k K) (res T, ok bool) { - m.mu.RLock() - defer m.mu.RUnlock() - res, ok = m.m[k] - return res, ok -} - -func (m *SyncMap[K, T]) Put(k K, v T) (T, bool) { - m.mu.Lock() - defer m.mu.Unlock() - old, ok := m.m[k] - m.m[k] = v - return old, ok -} - -func (m *SyncMap[K, T]) Do(k K, fn func(T, bool) (T, bool)) (after T, ok bool) { - m.mu.Lock() - defer m.mu.Unlock() - val, ok := m.m[k] - nv, save := fn(val, ok) - if save { - m.m[k] = nv - } - return nv, ok -} - -func (m *SyncMap[K, T]) DoAndStore(k K, fn func(t T, ok bool) T) (after T, ok bool) { - return m.Do(k, func(t T, b bool) (T, bool) { - res := fn(t, b) - return res, true - }) -} - -func (m *SyncMap[K, T]) Range(fn func(k K, v T) error) error { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.m { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -func (m *SyncMap[K, T]) Delete(k K) (t T, deleted bool) { - m.mu.Lock() - defer m.mu.Unlock() - val, ok := m.m[k] - if !ok { - return t, false - } - delete(m.m, k) - return val, true -} From 996fccb4c02c126a6f06da5748f2d2fc6af5f678 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Fri, 6 Sep 2024 03:55:53 +0100 Subject: [PATCH 37/49] Added RPC filter limits, metrics, and better memory management. (#10718) (#11894) relates to https://github.com/erigontech/erigon/issues/11890 cherry pick from E3 https://github.com/erigontech/erigon/commit/adf3f438d8aae4c749e5ddacb3fe08afd6e695e7 ----- - Introduced `GetOrCreateGaugeVec` function to manage Prometheus `GaugeVec` metrics. - Implemented various subscription filter limits (max logs, max headers, max transactions, max addresses, and max topics). - Updated various test files to utilize the new `FiltersConfig` and added comprehensive tests for the new filter limits. - Added new flags for subscription filter limits. - Improved log filtering and event handling with better memory management and metric tracking. - Added configuration to handle RPC subscription filter limits. - Minor typo fixes. ------- RPC nodes would frequently OOM. Based on metrics and pprof, I observed high memory utilization in the rpchelper: ```text File: erigon Type: inuse_space Entering interactive mode (type "help" for commands, "o" for options) (pprof) top Showing nodes accounting for 69696.67MB, 95.37% of 73083.31MB total Dropped 1081 nodes (cum <= 365.42MB) Showing top 10 nodes out of 43 flat flat% sum% cum cum% 65925.80MB 90.21% 90.21% 65925.80MB 90.21% github.com/ledgerwatch/erigon/turbo/rpchelper.(*LogsFilterAggregator).distributeLog.func1 1944.80MB 2.66% 92.87% 1944.80MB 2.66% github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddLogs.func1 1055.97MB 1.44% 94.31% 1055.97MB 1.44% github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs.func1 362.04MB 0.5% 94.81% 572.20MB 0.78% github.com/ledgerwatch/erigon/core/state.(*stateObject).GetCommittedState 257.73MB 0.35% 95.16% 587.85MB 0.8% encoding/json.Marshal 57.85MB 0.079% 95.24% 1658.66MB 2.27% github.com/ledgerwatch/erigon/core/vm.(*EVMInterpreter).Run 51.48MB 0.07% 95.31% 2014.50MB 2.76% github.com/ledgerwatch/erigon/rpc.(*handler).handleCallMsg 23.01MB 0.031% 95.34% 1127.21MB 1.54% github.com/ledgerwatch/erigon/core/vm.opCall 11.50MB 0.016% 95.36% 1677.68MB 2.30% github.com/ledgerwatch/erigon/core/vm.(*EVM).call 6.50MB 0.0089% 95.37% 685.70MB 0.94% github.com/ledgerwatch/erigon/turbo/transactions.DoCall (pprof) list distributeLog.func1 Total: 71.37GB ROUTINE ======================== github.com/ledgerwatch/erigon/turbo/rpchelper.(*LogsFilterAggregator).distributeLog.func1 in github.com/ledgerwatch/erigon/turbo/rpchelper/logsfilter.go 64.38GB 64.38GB (flat, cum) 90.21% of Total . . 206: a.logsFilters.Range(func(k LogsSubID, filter *LogsFilter) error { . . 207: if filter.allAddrs == 0 { . . 208: _, addrOk := filter.addrs.Get(gointerfaces.ConvertH160toAddress(eventLog.Address)) . . 209: if !addrOk { . . 210: return nil . . 211: } . . 212: } . . 213: var topics []libcommon.Hash . . 214: for _, topic := range eventLog.Topics { 27.01GB 27.01GB 215: topics = append(topics, gointerfaces.ConvertH256ToHash(topic)) . . 216: } . . 217: if filter.allTopics == 0 { . . 218: if !a.chooseTopics(filter, topics) { . . 219: return nil . . 220: } . . 221: } 37.37GB 37.37GB 222: lg := &types2.Log{ . . 223: Address: gointerfaces.ConvertH160toAddress(eventLog.Address), . . 224: Topics: topics, . . 225: Data: eventLog.Data, . . 226: BlockNumber: eventLog.BlockNumber, . . 227: TxHash: gointerfaces.ConvertH256ToHash(eventLog.TransactionHash), (pprof) % ``` - Added Metrics Implemented metrics to gain visibility into memory utilization. - Optimized Allocations Optimized object and slice allocations, resulting in significant improvements in memory usage. ```text File: erigon Type: inuse_space Entering interactive mode (type "help" for commands, "o" for options) (pprof) top Showing nodes accounting for 25.92GB, 94.14% of 27.53GB total Dropped 785 nodes (cum <= 0.14GB) Showing top 10 nodes out of 70 flat flat% sum% cum cum% 15.37GB 55.82% 55.82% 15.37GB 55.82% github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddLogs.func1 8.15GB 29.60% 85.42% 8.15GB 29.60% github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs.func1 0.64GB 2.33% 87.75% 1GB 3.65% github.com/ledgerwatch/erigon/turbo/rpchelper.(*LogsFilterAggregator).distributeLog 0.50GB 1.82% 89.57% 0.65GB 2.35% github.com/ledgerwatch/erigon/core/state.(*stateObject).GetCommittedState 0.36GB 1.32% 90.89% 0.36GB 1.32% github.com/ledgerwatch/erigon/turbo/rpchelper.(*LogsFilterAggregator).distributeLog.func1 0.31GB 1.12% 92.01% 0.31GB 1.12% bytes.growSlice 0.30GB 1.08% 93.09% 0.30GB 1.08% github.com/ledgerwatch/erigon/core/vm.(*Memory).Resize (inline) 0.15GB 0.56% 93.65% 0.35GB 1.27% github.com/ledgerwatch/erigon/core/state.(*IntraBlockState).AddSlotToAccessList 0.07GB 0.24% 93.90% 0.23GB 0.84% github.com/ledgerwatch/erigon/core/types.codecSelfer2.decLogs 0.07GB 0.24% 94.14% 1.58GB 5.75% github.com/ledgerwatch/erigon/core/vm.(*EVMInterpreter).Run (pprof) list AddLogs.func1 Total: 27.53GB ROUTINE ======================== github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddLogs.func1 in github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go 15.37GB 15.37GB (flat, cum) 55.82% of Total . . 644: ff.logsStores.DoAndStore(id, func(st []*types.Log, ok bool) []*types.Log { . . 645: if !ok { . . 646: st = make([]*types.Log, 0) . . 647: } 15.37GB 15.37GB 648: st = append(st, logs) . . 649: return st . . 650: }) . . 651:} . . 652: . . 653:// ReadLogs reads logs from the store associated with the given subscription ID. (pprof) list AddPendingTxs.func1 Total: 27.53GB ROUTINE ======================== github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs.func1 in github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go 8.15GB 8.15GB (flat, cum) 29.60% of Total . . 686: ff.pendingTxsStores.DoAndStore(id, func(st [][]types.Transaction, ok bool) [][]types.Transaction { . . 687: if !ok { . . 688: st = make([][]types.Transaction, 0) . . 689: } 8.15GB 8.15GB 690: st = append(st, txs) . . 691: return st . . 692: }) . . 693:} . . 694: . . 695:// ReadPendingTxs reads pending transactions from the store associated with the given subscription ID. (pprof) % ``` - Identified unbound slices that could grow indefinitely, leading to memory leaks. This issue occurs if subscribers do not request updates using their subscription ID, especially when behind a load balancer that does not pin clients to RPC nodes. 1. Architecture Design Changes - Implement architectural changes to pin clients to RPC Nodes ensuring subscribers that request updates using their subscription IDs will hit the same node, which does clean up the objecs on each request. - Reason for not choosing: This still relies on clients with subscriptions to request updates and if they never do and do not call the unsubscribe function it's an unbound memory leak. The RPC node operator has no control over the behavior of the clients. 2. Implementing Timeouts - Introduce timeouts for subscriptions to automatically clean up unresponsive or inactive subscriptions. - Reason for not choosing: Implementing timeouts would inadvertently stop websocket subscriptions due to the current design of the code, leading to a potential loss of data for active users. This solution would be good but requires a lot more work. 3. Configurable Limits (Chosen Solution) - Set configurable limits for various subscription parameters (e.g., logs, headers, transactions, addresses, topics) to manage memory utilization effectively. - Reason for choosing: This approach provides flexibility to RPC node operators to configure limits as per their requirements. The default behavior remains unchanged, making it a non-breaking change. Additionally, it ensures current data is always available by pruning the oldest data first. ```bash --rpc.subscription.filters.maxlogs=60 --rpc.subscription.filters.maxheaders=60 --rpc.subscription.filters.maxtxs=10_000 --rpc.subscription.filters.maxaddresses=1_000 --rpc.subscription.filters.maxtopics=1_000 ``` ```text File: erigon Type: inuse_space Entering interactive mode (type "help" for commands, "o" for options) (pprof) top Showing nodes accounting for 1146.51MB, 89.28% of 1284.17MB total Dropped 403 nodes (cum <= 6.42MB) Showing top 10 nodes out of 124 flat flat% sum% cum cum% 923.64MB 71.92% 71.92% 923.64MB 71.92% github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs.func1 54.59MB 4.25% 76.18% 54.59MB 4.25% github.com/ledgerwatch/erigon/rlp.(*Stream).Bytes 38.21MB 2.98% 79.15% 38.21MB 2.98% bytes.growSlice 29.34MB 2.28% 81.44% 29.34MB 2.28% github.com/ledgerwatch/erigon/p2p/rlpx.growslice 26.44MB 2.06% 83.49% 34.16MB 2.66% compress/flate.NewWriter 18.66MB 1.45% 84.95% 21.16MB 1.65% github.com/ledgerwatch/erigon/core/state.(*stateObject).GetCommittedState 17.50MB 1.36% 86.31% 64.58MB 5.03% github.com/ledgerwatch/erigon/core/types.(*DynamicFeeTransaction).DecodeRLP 15MB 1.17% 87.48% 82.08MB 6.39% github.com/ledgerwatch/erigon/core/types.UnmarshalTransactionFromBinary 13.58MB 1.06% 88.54% 13.58MB 1.06% github.com/ledgerwatch/erigon/turbo/stages/headerdownload.(*HeaderDownload).addHeaderAsLink 9.55MB 0.74% 89.28% 9.55MB 0.74% github.com/ledgerwatch/erigon/turbo/rpchelper.newChanSub[...] (pprof) list AddPendingTxs.func1 Total: 1.25GB ROUTINE ======================== github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs.func1 in github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go 923.64MB 923.64MB (flat, cum) 71.92% of Total . . 698: ff.pendingTxsStores.DoAndStore(id, func(st [][]types.Transaction, ok bool) [][]types.Transaction { . . 699: if !ok { . . 700: st = make([][]types.Transaction, 0) . . 701: } . . 702: . . 703: // Calculate the total number of transactions in st . . 704: totalTxs := 0 . . 705: for _, txBatch := range st { . . 706: totalTxs += len(txBatch) . . 707: } . . 708: . . 709: maxTxs := ff.config.RpcSubscriptionFiltersMaxTxs . . 710: // If adding the new transactions would exceed maxTxs, remove oldest transactions . . 711: if maxTxs > 0 && totalTxs+len(txs) > maxTxs { . . 712: // Flatten st to a single slice 918.69MB 918.69MB 713: flatSt := make([]types.Transaction, 0, totalTxs) . . 714: for _, txBatch := range st { . . 715: flatSt = append(flatSt, txBatch...) . . 716: } . . 717: . . 718: // Remove the oldest transactions to make space for new ones . . 719: if len(flatSt)+len(txs) > maxTxs { . . 720: flatSt = flatSt[len(flatSt)+len(txs)-maxTxs:] . . 721: } . . 722: . . 723: // Convert flatSt back to [][]types.Transaction with a single batch . . 724: st = [][]types.Transaction{flatSt} . . 725: } . . 726: . . 727: // Append the new transactions as a new batch 5.95MB 4.95MB 728: st = append(st, txs) . . 729: return st . . 730: }) . . 731:} . . 732: . . 733:// ReadPendingTxs reads pending transactions from the store associated with the given subscription ID. ``` With these changes, the memory utilization has significantly improved, and the system is now more stable and predictable. ----- image Blue line indicates a deployment Sharp drop of green line is an OOM Co-authored-by: Bret <787344+bretep@users.noreply.github.com> --- cmd/rpcdaemon/cli/config.go | 10 +- cmd/rpcdaemon/cli/httpcfg/http_cfg.go | 2 + erigon-lib/metrics/register.go | 21 ++++ erigon-lib/metrics/set.go | 119 +++++++++++++++++- eth/backend.go | 2 +- go.mod | 2 +- go.sum | 4 +- turbo/cli/default_flags.go | 6 + turbo/cli/flags.go | 50 ++++++-- turbo/jsonrpc/eth_block_test.go | 2 +- turbo/jsonrpc/eth_call_test.go | 2 +- turbo/jsonrpc/eth_filters_test.go | 4 +- turbo/jsonrpc/eth_mining_test.go | 4 +- turbo/jsonrpc/eth_subscribe_test.go | 2 +- turbo/jsonrpc/send_transaction_test.go | 4 +- turbo/jsonrpc/txpool_api_test.go | 2 +- turbo/rpchelper/config.go | 22 ++++ turbo/rpchelper/filters.go | 91 ++++++++++++-- turbo/rpchelper/filters_deadlock_test.go | 3 +- turbo/rpchelper/filters_test.go | 149 ++++++++++++++++++++++- turbo/rpchelper/logsfilter.go | 61 ++++++++-- turbo/rpchelper/metrics.go | 19 +++ 22 files changed, 524 insertions(+), 57 deletions(-) create mode 100644 turbo/rpchelper/config.go create mode 100644 turbo/rpchelper/metrics.go diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index dd3c917d50e..50d6cf7d674 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -140,6 +140,11 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().DurationVar(&cfg.EvmCallTimeout, "rpc.evmtimeout", rpccfg.DefaultEvmCallTimeout, "Maximum amount of time to wait for the answer from EVM call.") rootCmd.PersistentFlags().DurationVar(&cfg.OverlayGetLogsTimeout, "rpc.overlay.getlogstimeout", rpccfg.DefaultOverlayGetLogsTimeout, "Maximum amount of time to wait for the answer from the overlay_getLogs call.") rootCmd.PersistentFlags().DurationVar(&cfg.OverlayReplayBlockTimeout, "rpc.overlay.replayblocktimeout", rpccfg.DefaultOverlayReplayBlockTimeout, "Maximum amount of time to wait for the answer to replay a single block when called from an overlay_getLogs call.") + rootCmd.PersistentFlags().IntVar(&cfg.RpcFiltersConfig.RpcSubscriptionFiltersMaxLogs, "rpc.subscription.filters.maxlogs", rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxLogs, "Maximum number of logs to store per subscription.") + rootCmd.PersistentFlags().IntVar(&cfg.RpcFiltersConfig.RpcSubscriptionFiltersMaxHeaders, "rpc.subscription.filters.maxheaders", rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxHeaders, "Maximum number of block headers to store per subscription.") + rootCmd.PersistentFlags().IntVar(&cfg.RpcFiltersConfig.RpcSubscriptionFiltersMaxTxs, "rpc.subscription.filters.maxtxs", rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxTxs, "Maximum number of transactions to store per subscription.") + rootCmd.PersistentFlags().IntVar(&cfg.RpcFiltersConfig.RpcSubscriptionFiltersMaxAddresses, "rpc.subscription.filters.maxaddresses", rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxAddresses, "Maximum number of addresses per subscription to filter logs by.") + rootCmd.PersistentFlags().IntVar(&cfg.RpcFiltersConfig.RpcSubscriptionFiltersMaxTopics, "rpc.subscription.filters.maxtopics", rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxTopics, "Maximum number of topics per subscription to filter logs by.") rootCmd.PersistentFlags().IntVar(&cfg.BatchLimit, utils.RpcBatchLimit.Name, utils.RpcBatchLimit.Value, utils.RpcBatchLimit.Usage) rootCmd.PersistentFlags().IntVar(&cfg.ReturnDataLimit, utils.RpcReturnDataLimit.Name, utils.RpcReturnDataLimit.Value, utils.RpcReturnDataLimit.Usage) rootCmd.PersistentFlags().BoolVar(&cfg.AllowUnprotectedTxs, utils.AllowUnprotectedTxs.Name, utils.AllowUnprotectedTxs.Value, utils.AllowUnprotectedTxs.Usage) @@ -269,6 +274,7 @@ func checkDbCompatibility(ctx context.Context, db kv.RoDB) error { func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, + rpcFiltersConfig rpchelper.FiltersConfig, blockReader services.FullBlockReader, ethBackendServer remote.ETHBACKENDServer, txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer, stateDiffClient StateChangesClient, logger log.Logger, @@ -291,7 +297,7 @@ func EmbeddedServices(ctx context.Context, txPool = direct.NewTxPoolClient(txPoolServer) mining = direct.NewMiningClient(miningServer) - ff = rpchelper.New(ctx, eth, txPool, mining, func() {}, logger) + ff = rpchelper.New(ctx, rpcFiltersConfig, eth, txPool, mining, func() {}, logger) return } @@ -541,7 +547,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger } }() - ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot, logger) + ff = rpchelper.New(ctx, cfg.RpcFiltersConfig, eth, txPool, mining, onNewSnapshot, logger) return db, eth, txPool, mining, stateCache, blockReader, engine, ff, agg, err } diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go index 3673eaac5b0..d66522bbe0b 100644 --- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go @@ -1,6 +1,7 @@ package httpcfg import ( + "github.com/ledgerwatch/erigon/turbo/rpchelper" "time" "github.com/ledgerwatch/erigon-lib/common/datadir" @@ -51,6 +52,7 @@ type HttpCfg struct { RpcAllowListFilePath string RpcBatchConcurrency uint RpcStreamingDisable bool + RpcFiltersConfig rpchelper.FiltersConfig DBReadConcurrency int TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum TxPoolApiAddr string diff --git a/erigon-lib/metrics/register.go b/erigon-lib/metrics/register.go index 2ac13a6b4ca..46624d2216a 100644 --- a/erigon-lib/metrics/register.go +++ b/erigon-lib/metrics/register.go @@ -2,6 +2,7 @@ package metrics import ( "fmt" + "github.com/prometheus/client_golang/prometheus" ) // NewCounter registers and returns new counter with the given name. @@ -88,6 +89,26 @@ func GetOrCreateGauge(name string) Gauge { return &gauge{g} } +// GetOrCreateGaugeVec returns registered GaugeVec with the given name +// or creates a new GaugeVec if the registry doesn't contain a GaugeVec with +// the given name and labels. +// +// name must be a valid Prometheus-compatible metric with possible labels. +// labels are the names of the dimensions associated with the gauge vector. +// For instance, +// +// - foo, with labels []string{"bar", "baz"} +// +// The returned GaugeVec is safe to use from concurrent goroutines. +func GetOrCreateGaugeVec(name string, labels []string, help ...string) *prometheus.GaugeVec { + gv, err := defaultSet.GetOrCreateGaugeVec(name, labels, help...) + if err != nil { + panic(fmt.Errorf("could not get or create new gaugevec: %w", err)) + } + + return gv +} + // NewSummary creates and returns new summary with the given name. // // name must be valid Prometheus-compatible metric with possible labels. diff --git a/erigon-lib/metrics/set.go b/erigon-lib/metrics/set.go index 2b0418fd2bd..289ac6970d0 100644 --- a/erigon-lib/metrics/set.go +++ b/erigon-lib/metrics/set.go @@ -2,6 +2,7 @@ package metrics import ( "fmt" + "reflect" "sort" "strings" "sync" @@ -16,15 +17,23 @@ type namedMetric struct { isAux bool } +type namedMetricVec struct { + name string + metric *prometheus.GaugeVec + isAux bool +} + // Set is a set of metrics. // // Metrics belonging to a set are exported separately from global metrics. // // Set.WritePrometheus must be called for exporting metrics from the set. type Set struct { - mu sync.Mutex - a []*namedMetric - m map[string]*namedMetric + mu sync.Mutex + a []*namedMetric + av []*namedMetricVec + m map[string]*namedMetric + vecs map[string]*namedMetricVec } var defaultSet = NewSet() @@ -34,7 +43,8 @@ var defaultSet = NewSet() // Pass the set to RegisterSet() function in order to export its metrics via global WritePrometheus() call. func NewSet() *Set { return &Set{ - m: make(map[string]*namedMetric), + m: make(map[string]*namedMetric), + vecs: make(map[string]*namedMetricVec), } } @@ -46,11 +56,18 @@ func (s *Set) Describe(ch chan<- *prometheus.Desc) { if !sort.SliceIsSorted(s.a, lessFunc) { sort.Slice(s.a, lessFunc) } + if !sort.SliceIsSorted(s.av, lessFunc) { + sort.Slice(s.av, lessFunc) + } sa := append([]*namedMetric(nil), s.a...) + sav := append([]*namedMetricVec(nil), s.av...) s.mu.Unlock() for _, nm := range sa { ch <- nm.metric.Desc() } + for _, nmv := range sav { + nmv.metric.Describe(ch) + } } func (s *Set) Collect(ch chan<- prometheus.Metric) { @@ -61,11 +78,18 @@ func (s *Set) Collect(ch chan<- prometheus.Metric) { if !sort.SliceIsSorted(s.a, lessFunc) { sort.Slice(s.a, lessFunc) } + if !sort.SliceIsSorted(s.av, lessFunc) { + sort.Slice(s.av, lessFunc) + } sa := append([]*namedMetric(nil), s.a...) + sav := append([]*namedMetricVec(nil), s.av...) s.mu.Unlock() for _, nm := range sa { ch <- nm.metric } + for _, nmv := range sav { + nmv.metric.Collect(ch) + } } // NewHistogram creates and returns new histogram in s with the given name. @@ -307,6 +331,78 @@ func (s *Set) GetOrCreateGauge(name string, help ...string) (prometheus.Gauge, e return g, nil } +// GetOrCreateGaugeVec returns registered GaugeVec in s with the given name +// or creates new GaugeVec if s doesn't contain GaugeVec with the given name. +// +// name must be valid Prometheus-compatible metric with possible labels. +// For instance, +// +// - foo +// - foo{bar="baz"} +// - foo{bar="baz",aaa="b"} +// +// labels are the labels associated with the GaugeVec. +// +// The returned GaugeVec is safe to use from concurrent goroutines. +func (s *Set) GetOrCreateGaugeVec(name string, labels []string, help ...string) (*prometheus.GaugeVec, error) { + s.mu.Lock() + nm := s.vecs[name] + s.mu.Unlock() + if nm == nil { + metric, err := newGaugeVec(name, labels, help...) + if err != nil { + return nil, fmt.Errorf("invalid metric name %q: %w", name, err) + } + + nmNew := &namedMetricVec{ + name: name, + metric: metric, + } + + s.mu.Lock() + nm = s.vecs[name] + if nm == nil { + nm = nmNew + s.vecs[name] = nm + s.av = append(s.av, nm) + } + s.mu.Unlock() + s.registerMetricVec(name, metric, false) + } + + if nm.metric == nil { + return nil, fmt.Errorf("metric %q is nil", name) + } + + metricType := reflect.TypeOf(nm.metric) + if metricType != reflect.TypeOf(&prometheus.GaugeVec{}) { + return nil, fmt.Errorf("metric %q isn't a GaugeVec. It is %s", name, metricType) + } + + return nm.metric, nil +} + +// newGaugeVec creates a new Prometheus GaugeVec. +func newGaugeVec(name string, labels []string, help ...string) (*prometheus.GaugeVec, error) { + name, constLabels, err := parseMetric(name) + if err != nil { + return nil, err + } + + helpStr := "gauge metric" + if len(help) > 0 { + helpStr = strings.Join(help, ", ") + } + + gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: name, + Help: helpStr, + ConstLabels: constLabels, + }, labels) + + return gv, nil +} + const defaultSummaryWindow = 5 * time.Minute var defaultSummaryQuantiles = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.97: 0.003, 0.99: 0.001} @@ -431,6 +527,21 @@ func (s *Set) registerMetric(name string, m prometheus.Metric) { s.mustRegisterLocked(name, m) } +func (s *Set) registerMetricVec(name string, mv *prometheus.GaugeVec, isAux bool) { + s.mu.Lock() + defer s.mu.Unlock() + + if _, exists := s.vecs[name]; !exists { + nmv := &namedMetricVec{ + name: name, + metric: mv, + isAux: isAux, + } + s.vecs[name] = nmv + s.av = append(s.av, nmv) + } +} + // mustRegisterLocked registers given metric with the given name. // // Panics if the given name was already registered before. diff --git a/eth/backend.go b/eth/backend.go index 5c9127e36e0..f4d9ba5b1b1 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -951,7 +951,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig } // start HTTP API httpRpcCfg := stack.Config().Http - ethRpcClient, txPoolRpcClient, miningRpcClient, stateCache, ff, err := cli.EmbeddedServices(ctx, chainKv, httpRpcCfg.StateCache, blockReader, ethBackendRPC, + ethRpcClient, txPoolRpcClient, miningRpcClient, stateCache, ff, err := cli.EmbeddedServices(ctx, chainKv, httpRpcCfg.StateCache, httpRpcCfg.RpcFiltersConfig, blockReader, ethBackendRPC, s.txPoolGrpcServer, miningRPC, stateDiffClient, s.logger) if err != nil { return err diff --git a/go.mod b/go.mod index 28d5efbbbfa..01366c49558 100644 --- a/go.mod +++ b/go.mod @@ -245,7 +245,7 @@ require ( github.com/pion/webrtc/v3 v3.1.42 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect diff --git a/go.sum b/go.sum index 286fb740e09..bd32da5b4bc 100644 --- a/go.sum +++ b/go.sum @@ -749,8 +749,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 251094811bf..7f77739152a 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -93,6 +93,12 @@ var DefaultFlags = []cli.Flag{ &OverlayGetLogsFlag, &OverlayReplayBlockFlag, + &RpcSubscriptionFiltersMaxLogsFlag, + &RpcSubscriptionFiltersMaxHeadersFlag, + &RpcSubscriptionFiltersMaxTxsFlag, + &RpcSubscriptionFiltersMaxAddressesFlag, + &RpcSubscriptionFiltersMaxTopicsFlag, + &utils.SnapKeepBlocksFlag, &utils.SnapStopFlag, &utils.DbPageSizeFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 41bd8d703f1..55155b81dcf 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) var ( @@ -246,6 +247,32 @@ var ( Value: rpccfg.DefaultOverlayReplayBlockTimeout, } + RpcSubscriptionFiltersMaxLogsFlag = cli.IntFlag{ + Name: "rpc.subscription.filters.maxlogs", + Usage: "Maximum number of logs to store per subscription.", + Value: rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxLogs, + } + RpcSubscriptionFiltersMaxHeadersFlag = cli.IntFlag{ + Name: "rpc.subscription.filters.maxheaders", + Usage: "Maximum number of block headers to store per subscription.", + Value: rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxHeaders, + } + RpcSubscriptionFiltersMaxTxsFlag = cli.IntFlag{ + Name: "rpc.subscription.filters.maxtxs", + Usage: "Maximum number of transactions to store per subscription.", + Value: rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxTxs, + } + RpcSubscriptionFiltersMaxAddressesFlag = cli.IntFlag{ + Name: "rpc.subscription.filters.maxaddresses", + Usage: "Maximum number of addresses per subscription to filter logs by.", + Value: rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxAddresses, + } + RpcSubscriptionFiltersMaxTopicsFlag = cli.IntFlag{ + Name: "rpc.subscription.filters.maxtopics", + Usage: "Maximum number of topics per subscription to filter logs by.", + Value: rpchelper.DefaultFiltersConfig.RpcSubscriptionFiltersMaxTopics, + } + TxPoolCommitEvery = cli.DurationFlag{ Name: "txpool.commit.every", Usage: "How often transactions should be committed to the storage", @@ -487,14 +514,21 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logg RpcStreamingDisable: ctx.Bool(utils.RpcStreamingDisableFlag.Name), DBReadConcurrency: ctx.Int(utils.DBReadConcurrencyFlag.Name), RpcAllowListFilePath: ctx.String(utils.RpcAccessListFlag.Name), - Gascap: ctx.Uint64(utils.RpcGasCapFlag.Name), - Feecap: ctx.Float64(utils.RPCGlobalTxFeeCapFlag.Name), - MaxTraces: ctx.Uint64(utils.TraceMaxtracesFlag.Name), - TraceCompatibility: ctx.Bool(utils.RpcTraceCompatFlag.Name), - BatchLimit: ctx.Int(utils.RpcBatchLimit.Name), - ReturnDataLimit: ctx.Int(utils.RpcReturnDataLimit.Name), - AllowUnprotectedTxs: ctx.Bool(utils.AllowUnprotectedTxs.Name), - MaxGetProofRewindBlockCount: ctx.Int(utils.RpcMaxGetProofRewindBlockCount.Name), + RpcFiltersConfig: rpchelper.FiltersConfig{ + RpcSubscriptionFiltersMaxLogs: ctx.Int(RpcSubscriptionFiltersMaxLogsFlag.Name), + RpcSubscriptionFiltersMaxHeaders: ctx.Int(RpcSubscriptionFiltersMaxHeadersFlag.Name), + RpcSubscriptionFiltersMaxTxs: ctx.Int(RpcSubscriptionFiltersMaxTxsFlag.Name), + RpcSubscriptionFiltersMaxAddresses: ctx.Int(RpcSubscriptionFiltersMaxAddressesFlag.Name), + RpcSubscriptionFiltersMaxTopics: ctx.Int(RpcSubscriptionFiltersMaxTopicsFlag.Name), + }, + Gascap: ctx.Uint64(utils.RpcGasCapFlag.Name), + Feecap: ctx.Float64(utils.RPCGlobalTxFeeCapFlag.Name), + MaxTraces: ctx.Uint64(utils.TraceMaxtracesFlag.Name), + TraceCompatibility: ctx.Bool(utils.RpcTraceCompatFlag.Name), + BatchLimit: ctx.Int(utils.RpcBatchLimit.Name), + ReturnDataLimit: ctx.Int(utils.RpcReturnDataLimit.Name), + AllowUnprotectedTxs: ctx.Bool(utils.AllowUnprotectedTxs.Name), + MaxGetProofRewindBlockCount: ctx.Int(utils.RpcMaxGetProofRewindBlockCount.Name), OtsMaxPageSize: ctx.Uint64(utils.OtsSearchMaxCapFlag.Name), diff --git a/turbo/jsonrpc/eth_block_test.go b/turbo/jsonrpc/eth_block_test.go index 4758c200237..2e804e48a37 100644 --- a/turbo/jsonrpc/eth_block_test.go +++ b/turbo/jsonrpc/eth_block_test.go @@ -74,7 +74,7 @@ func TestGetBlockByNumberWithPendingTag(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log) expected := 1 header := &types.Header{ diff --git a/turbo/jsonrpc/eth_call_test.go b/turbo/jsonrpc/eth_call_test.go index 80a9e665269..6d9811f8b5c 100644 --- a/turbo/jsonrpc/eth_call_test.go +++ b/turbo/jsonrpc/eth_call_test.go @@ -43,7 +43,7 @@ func TestEstimateGas(t *testing.T) { stateCache := kvcache.New(kvcache.DefaultCoherentConfig) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mock.Mock(t)) mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) var from = libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = libcommon.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") diff --git a/turbo/jsonrpc/eth_filters_test.go b/turbo/jsonrpc/eth_filters_test.go index b1823419e3e..530c8a74d14 100644 --- a/turbo/jsonrpc/eth_filters_test.go +++ b/turbo/jsonrpc/eth_filters_test.go @@ -32,7 +32,7 @@ func TestNewFilters(t *testing.T) { stateCache := kvcache.New(kvcache.DefaultCoherentConfig) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mock.Mock(t)) mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) ptf, err := api.NewPendingTransactionFilter(ctx) @@ -61,7 +61,7 @@ func TestLogsSubscribeAndUnsubscribe_WithoutConcurrentMapIssue(t *testing.T) { m := mock.Mock(t) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) // generate some random topics topics := make([][]libcommon.Hash, 0) diff --git a/turbo/jsonrpc/eth_mining_test.go b/turbo/jsonrpc/eth_mining_test.go index 93439127800..36049b2a6a9 100644 --- a/turbo/jsonrpc/eth_mining_test.go +++ b/turbo/jsonrpc/eth_mining_test.go @@ -25,7 +25,7 @@ func TestPendingBlock(t *testing.T) { m := mock.Mock(t) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mock.Mock(t)) mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) engine := ethash.NewFaker() api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, nil, false, rpccfg.DefaultEvmCallTimeout, engine, @@ -52,7 +52,7 @@ func TestPendingLogs(t *testing.T) { m := mock.Mock(t) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) expect := []byte{211} ch, id := ff.SubscribePendingLogs(1) diff --git a/turbo/jsonrpc/eth_subscribe_test.go b/turbo/jsonrpc/eth_subscribe_test.go index 1a595fb2718..154fda5cc4e 100644 --- a/turbo/jsonrpc/eth_subscribe_test.go +++ b/turbo/jsonrpc/eth_subscribe_test.go @@ -49,7 +49,7 @@ func TestEthSubscribe(t *testing.T) { backendServer := privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, m.BlockReader, logger, builder.NewLatestBlockBuiltStore()) backendClient := direct.NewEthBackendClientDirect(backendServer) backend := rpcservices.NewRemoteBackend(backendClient, m.DB, m.BlockReader) - ff := rpchelper.New(ctx, backend, nil, nil, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, backend, nil, nil, func() {}, m.Log) newHeads, id := ff.SubscribeNewHeads(16) defer ff.UnsubscribeHeads(id) diff --git a/turbo/jsonrpc/send_transaction_test.go b/turbo/jsonrpc/send_transaction_test.go index f93b1e53c85..18ceced9c6f 100644 --- a/turbo/jsonrpc/send_transaction_test.go +++ b/turbo/jsonrpc/send_transaction_test.go @@ -88,7 +88,7 @@ func TestSendRawTransaction(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mockSentry) txPool := txpool.NewTxpoolClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}, mockSentry.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, txPool, txpool.NewMiningClient(conn), func() {}, mockSentry.Log) api := jsonrpc.NewEthAPI(newBaseApiForTest(mockSentry), mockSentry.DB, nil, txPool, nil, 5000000, 1e18, 100_000, false, 100_000, 128, logger) buf := bytes.NewBuffer(nil) @@ -140,7 +140,7 @@ func TestSendRawTransactionUnprotected(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mockSentry) txPool := txpool.NewTxpoolClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}, mockSentry.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, txPool, txpool.NewMiningClient(conn), func() {}, mockSentry.Log) api := jsonrpc.NewEthAPI(newBaseApiForTest(mockSentry), mockSentry.DB, nil, txPool, nil, 5000000, 1e18, 100_000, false, 100_000, 128, logger) // Enable unproteced txs flag diff --git a/turbo/jsonrpc/txpool_api_test.go b/turbo/jsonrpc/txpool_api_test.go index 205a7d72408..8789ada0385 100644 --- a/turbo/jsonrpc/txpool_api_test.go +++ b/turbo/jsonrpc/txpool_api_test.go @@ -33,7 +33,7 @@ func TestTxPoolContent(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) - ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log) agg := m.HistoryV3Components() api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), m.BlockReader, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, txPool) diff --git a/turbo/rpchelper/config.go b/turbo/rpchelper/config.go new file mode 100644 index 00000000000..21610b6a508 --- /dev/null +++ b/turbo/rpchelper/config.go @@ -0,0 +1,22 @@ +package rpchelper + +// FiltersConfig defines the configuration settings for RPC subscription filters. +// Each field represents a limit on the number of respective items that can be stored per subscription. +type FiltersConfig struct { + RpcSubscriptionFiltersMaxLogs int // Maximum number of logs to store per subscription. Default: 0 (no limit) + RpcSubscriptionFiltersMaxHeaders int // Maximum number of block headers to store per subscription. Default: 0 (no limit) + RpcSubscriptionFiltersMaxTxs int // Maximum number of transactions to store per subscription. Default: 0 (no limit) + RpcSubscriptionFiltersMaxAddresses int // Maximum number of addresses per subscription to filter logs by. Default: 0 (no limit) + RpcSubscriptionFiltersMaxTopics int // Maximum number of topics per subscription to filter logs by. Default: 0 (no limit) +} + +// DefaultFiltersConfig defines the default settings for filter configurations. +// These default values set no limits on the number of logs, block headers, transactions, +// addresses, or topics that can be stored per subscription. +var DefaultFiltersConfig = FiltersConfig{ + RpcSubscriptionFiltersMaxLogs: 0, // No limit on the number of logs per subscription + RpcSubscriptionFiltersMaxHeaders: 0, // No limit on the number of block headers per subscription + RpcSubscriptionFiltersMaxTxs: 0, // No limit on the number of transactions per subscription + RpcSubscriptionFiltersMaxAddresses: 0, // No limit on the number of addresses per subscription to filter logs by + RpcSubscriptionFiltersMaxTopics: 0, // No limit on the number of topics per subscription to filter logs by +} diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index e3fed0d1a29..2057150c493 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/prometheus/client_golang/prometheus" "io" "reflect" "sync" @@ -48,12 +49,14 @@ type Filters struct { pendingHeadsStores *concurrent.SyncMap[HeadsSubID, []*types.Header] pendingTxsStores *concurrent.SyncMap[PendingTxsSubID, [][]types.Transaction] logger log.Logger + + config FiltersConfig } // New creates a new Filters instance, initializes it, and starts subscription goroutines for Ethereum events. // It requires a context, Ethereum backend, transaction pool client, mining client, snapshot callback function, // and a logger for logging events. -func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, onNewSnapshot func(), logger log.Logger) *Filters { +func New(ctx context.Context, config FiltersConfig, ethBackend ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, onNewSnapshot func(), logger log.Logger) *Filters { logger.Info("rpc filters: subscribing to Erigon events") ff := &Filters{ @@ -67,15 +70,18 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, pendingHeadsStores: concurrent.NewSyncMap[HeadsSubID, []*types.Header](), pendingTxsStores: concurrent.NewSyncMap[PendingTxsSubID, [][]types.Transaction](), logger: logger, + config: config, } go func() { if ethBackend == nil { return } + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "ethBackend_Events"}).Inc() for { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "ethBackend_Events"}).Dec() return default: } @@ -83,6 +89,7 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, if err := ethBackend.Subscribe(ctx, ff.OnNewEvent); err != nil { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "ethBackend_Events"}).Dec() return default: } @@ -99,15 +106,18 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, if ethBackend == nil { return } + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "ethBackend_Logs"}).Inc() for { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "ethBackend_Logs"}).Dec() return default: } if err := ethBackend.SubscribeLogs(ctx, ff.OnNewLogs, &ff.logsRequestor); err != nil { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "ethBackend_Logs"}).Dec() return default: } @@ -122,15 +132,18 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, if txPool != nil { go func() { + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingTxs"}).Inc() for { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingTxs"}).Dec() return default: } if err := ff.subscribeToPendingTransactions(ctx, txPool); err != nil { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingTxs"}).Dec() return default: } @@ -145,15 +158,18 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, if !reflect.ValueOf(mining).IsNil() { //https://groups.google.com/g/golang-nuts/c/wnH302gBa4I go func() { + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingBlock"}).Inc() for { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingBlock"}).Dec() return default: } if err := ff.subscribeToPendingBlocks(ctx, mining); err != nil { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingBlock"}).Dec() return default: } @@ -166,15 +182,18 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, } }() go func() { + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingLogs"}).Inc() for { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingLogs"}).Dec() return default: } if err := ff.subscribeToPendingLogs(ctx, mining); err != nil { select { case <-ctx.Done(): + activeSubscriptionsLogsClientGauge.With(prometheus.Labels{clientLabelName: "txPool_PendingLogs"}).Dec() return default: } @@ -414,26 +433,44 @@ func (ff *Filters) SubscribeLogs(size int, criteria filters.FilterCriteria) (<-c // If no addresses are specified, it means all addresses should be included f.allAddrs = 1 } else { + // Limit the number of addresses + addressCount := 0 for _, addr := range criteria.Addresses { - f.addrs.Put(addr, 1) + if ff.config.RpcSubscriptionFiltersMaxAddresses == 0 || addressCount < ff.config.RpcSubscriptionFiltersMaxAddresses { + f.addrs.Put(addr, 1) + addressCount++ + } else { + break + } } } - // Handle topics + // Handle topics and track the allowed topics if len(criteria.Topics) == 0 { // If no topics are specified, it means all topics should be included f.allTopics = 1 } else { + // Limit the number of topics + topicCount := 0 + allowedTopics := [][]libcommon.Hash{} for _, topics := range criteria.Topics { + allowedTopicsRow := []libcommon.Hash{} for _, topic := range topics { - f.topics.Put(topic, 1) + if ff.config.RpcSubscriptionFiltersMaxTopics == 0 || topicCount < ff.config.RpcSubscriptionFiltersMaxTopics { + f.topics.Put(topic, 1) + allowedTopicsRow = append(allowedTopicsRow, topic) + topicCount++ + } else { + break + } + } + if len(allowedTopicsRow) > 0 { + allowedTopics = append(allowedTopics, allowedTopicsRow) } } + f.topicsOriginal = allowedTopics } - // Store original topics for reference - f.topicsOriginal = criteria.Topics - // Add the filter to the list of log filters ff.logsSubs.addLogsFilters(f) @@ -602,12 +639,17 @@ func (ff *Filters) OnNewLogs(reply *remote.SubscribeLogsReply) { } // AddLogs adds logs to the store associated with the given subscription ID. -func (ff *Filters) AddLogs(id LogsSubID, logs *types.Log) { +func (ff *Filters) AddLogs(id LogsSubID, log *types.Log) { ff.logsStores.DoAndStore(id, func(st []*types.Log, ok bool) []*types.Log { if !ok { st = make([]*types.Log, 0) } - st = append(st, logs) + + maxLogs := ff.config.RpcSubscriptionFiltersMaxLogs + if maxLogs > 0 && len(st)+1 > maxLogs { + st = st[len(st)+1-maxLogs:] // Remove oldest logs to make space + } + st = append(st, log) return st }) } @@ -628,6 +670,11 @@ func (ff *Filters) AddPendingBlock(id HeadsSubID, block *types.Header) { if !ok { st = make([]*types.Header, 0) } + + maxHeaders := ff.config.RpcSubscriptionFiltersMaxHeaders + if maxHeaders > 0 && len(st) >= maxHeaders { + st = st[1:] // Remove the oldest header to make space + } st = append(st, block) return st }) @@ -649,6 +696,32 @@ func (ff *Filters) AddPendingTxs(id PendingTxsSubID, txs []types.Transaction) { if !ok { st = make([][]types.Transaction, 0) } + + // Calculate the total number of transactions in st + totalTxs := 0 + for _, txBatch := range st { + totalTxs += len(txBatch) + } + + maxTxs := ff.config.RpcSubscriptionFiltersMaxTxs + // If adding the new transactions would exceed maxTxs, remove oldest transactions + if maxTxs > 0 && totalTxs+len(txs) > maxTxs { + // Flatten st to a single slice + flatSt := make([]types.Transaction, 0, totalTxs) + for _, txBatch := range st { + flatSt = append(flatSt, txBatch...) + } + + // Remove the oldest transactions to make space for new ones + if len(flatSt)+len(txs) > maxTxs { + flatSt = flatSt[len(flatSt)+len(txs)-maxTxs:] + } + + // Convert flatSt back to [][]types.Transaction with a single batch + st = [][]types.Transaction{flatSt} + } + + // Append the new transactions as a new batch st = append(st, txs) return st }) diff --git a/turbo/rpchelper/filters_deadlock_test.go b/turbo/rpchelper/filters_deadlock_test.go index 1646ec19701..dc31fb9178d 100644 --- a/turbo/rpchelper/filters_deadlock_test.go +++ b/turbo/rpchelper/filters_deadlock_test.go @@ -18,7 +18,8 @@ import ( func TestFiltersDeadlock_Test(t *testing.T) { t.Parallel() logger := log.New() - f := rpchelper.New(context.TODO(), nil, nil, nil, func() {}, logger) + config := rpchelper.FiltersConfig{} + f := rpchelper.New(context.TODO(), config, nil, nil, nil, func() {}, logger) crit := filters.FilterCriteria{ Addresses: nil, Topics: [][]libcommon.Hash{}, diff --git a/turbo/rpchelper/filters_test.go b/turbo/rpchelper/filters_test.go index f90eb3912d1..593d2662c7e 100644 --- a/turbo/rpchelper/filters_test.go +++ b/turbo/rpchelper/filters_test.go @@ -2,6 +2,8 @@ package rpchelper import ( "context" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/core/types" "testing" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -49,7 +51,7 @@ func TestFilters_GenerateSubscriptionID(t *testing.T) { v := <-subs _, ok := set[v] if ok { - t.Errorf("SubscriptionID Confict: %s", v) + t.Errorf("SubscriptionID Conflict: %s", v) return } set[v] = struct{}{} @@ -58,7 +60,8 @@ func TestFilters_GenerateSubscriptionID(t *testing.T) { func TestFilters_SingleSubscription_OnlyTopicsSubscribedAreBroadcast(t *testing.T) { t.Parallel() - f := New(context.TODO(), nil, nil, nil, func() {}, log.New()) + config := FiltersConfig{} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) subbedTopic := libcommon.BytesToHash([]byte{10, 20}) @@ -90,7 +93,8 @@ func TestFilters_SingleSubscription_OnlyTopicsSubscribedAreBroadcast(t *testing. func TestFilters_SingleSubscription_EmptyTopicsInCriteria_OnlyTopicsSubscribedAreBroadcast(t *testing.T) { t.Parallel() - f := New(context.TODO(), nil, nil, nil, func() {}, log.New()) + config := FiltersConfig{} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) var nilTopic libcommon.Hash subbedTopic := libcommon.BytesToHash([]byte{10, 20}) @@ -123,7 +127,8 @@ func TestFilters_SingleSubscription_EmptyTopicsInCriteria_OnlyTopicsSubscribedAr func TestFilters_TwoSubscriptionsWithDifferentCriteria(t *testing.T) { t.Parallel() - f := New(context.TODO(), nil, nil, nil, func() {}, log.New()) + config := FiltersConfig{} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) criteria1 := filters.FilterCriteria{ Addresses: nil, @@ -163,7 +168,8 @@ func TestFilters_TwoSubscriptionsWithDifferentCriteria(t *testing.T) { func TestFilters_ThreeSubscriptionsWithDifferentCriteria(t *testing.T) { t.Parallel() - f := New(context.TODO(), nil, nil, nil, func() {}, log.New()) + config := FiltersConfig{} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) criteria1 := filters.FilterCriteria{ Addresses: nil, @@ -238,7 +244,8 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { return nil } - f := New(context.TODO(), nil, nil, nil, func() {}, log.New()) + config := FiltersConfig{} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) f.logsRequestor.Store(loadRequester) // first request has no filters @@ -347,3 +354,133 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { t.Error("6: expected topics to be empty") } } + +func TestFilters_AddLogs(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxLogs: 5} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + logID := LogsSubID("test-log") + logEntry := &types.Log{} + + // Add 10 logs to the store, but limit is 5 + for i := 0; i < 10; i++ { + f.AddLogs(logID, logEntry) + } + + logs, found := f.ReadLogs(logID) + if !found { + t.Error("expected to find logs in the store") + } + if len(logs) != 5 { + t.Errorf("expected 5 logs in the store, got %d", len(logs)) + } +} + +func TestFilters_AddLogs_Unlimited(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxLogs: 0} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + logID := LogsSubID("test-log") + logEntry := &types.Log{} + + // Add 10 logs to the store, limit is unlimited + for i := 0; i < 10; i++ { + f.AddLogs(logID, logEntry) + } + + logs, found := f.ReadLogs(logID) + if !found { + t.Error("expected to find logs in the store") + } + if len(logs) != 10 { + t.Errorf("expected 10 logs in the store, got %d", len(logs)) + } +} + +func TestFilters_AddPendingBlocks(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxHeaders: 3} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + headerID := HeadsSubID("test-header") + header := &types.Header{} + + // Add 5 headers to the store, but limit is 3 + for i := 0; i < 5; i++ { + f.AddPendingBlock(headerID, header) + } + + headers, found := f.ReadPendingBlocks(headerID) + if !found { + t.Error("expected to find headers in the store") + } + if len(headers) != 3 { + t.Errorf("expected 3 headers in the store, got %d", len(headers)) + } +} + +func TestFilters_AddPendingBlocks_Unlimited(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxHeaders: 0} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + headerID := HeadsSubID("test-header") + header := &types.Header{} + + // Add 5 headers to the store, limit is unlimited + for i := 0; i < 5; i++ { + f.AddPendingBlock(headerID, header) + } + + headers, found := f.ReadPendingBlocks(headerID) + if !found { + t.Error("expected to find headers in the store") + } + if len(headers) != 5 { + t.Errorf("expected 5 headers in the store, got %d", len(headers)) + } +} + +func TestFilters_AddPendingTxs(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxTxs: 4} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + txID := PendingTxsSubID("test-tx") + var tx types.Transaction = types.NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), uint256.NewInt(10), 50000, uint256.NewInt(10), nil) + tx, _ = tx.WithSignature(*types.LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) + + // Add 6 txs to the store, but limit is 4 + for i := 0; i < 6; i++ { + f.AddPendingTxs(txID, []types.Transaction{tx}) + } + + txs, found := f.ReadPendingTxs(txID) + if !found { + t.Error("expected to find txs in the store") + } + totalTxs := 0 + for _, batch := range txs { + totalTxs += len(batch) + } + if totalTxs != 4 { + t.Errorf("expected 4 txs in the store, got %d", totalTxs) + } +} + +func TestFilters_AddPendingTxs_Unlimited(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxTxs: 0} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + txID := PendingTxsSubID("test-tx") + var tx types.Transaction = types.NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), uint256.NewInt(10), 50000, uint256.NewInt(10), nil) + tx, _ = tx.WithSignature(*types.LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) + + // Add 6 txs to the store, limit is unlimited + for i := 0; i < 6; i++ { + f.AddPendingTxs(txID, []types.Transaction{tx}) + } + + txs, found := f.ReadPendingTxs(txID) + if !found { + t.Error("expected to find txs in the store") + } + totalTxs := 0 + for _, batch := range txs { + totalTxs += len(batch) + } + if totalTxs != 6 { + t.Errorf("expected 6 txs in the store, got %d", totalTxs) + } +} diff --git a/turbo/rpchelper/logsfilter.go b/turbo/rpchelper/logsfilter.go index 8a6c3e06043..a588d370f59 100644 --- a/turbo/rpchelper/logsfilter.go +++ b/turbo/rpchelper/logsfilter.go @@ -105,9 +105,15 @@ func (a *LogsFilterAggregator) createFilterRequest() *remote.LogsFilterRequest { // provided LogsFilter. If the count for any address or topic reaches zero, it is removed from the aggregated filter. func (a *LogsFilterAggregator) subtractLogFilters(f *LogsFilter) { a.aggLogsFilter.allAddrs -= f.allAddrs + if f.allAddrs > 0 { + // Decrement the count for AllAddresses + activeSubscriptionsLogsAllAddressesGauge.Dec() + } f.addrs.Range(func(addr libcommon.Address, count int) error { a.aggLogsFilter.addrs.Do(addr, func(value int, exists bool) (int, bool) { if exists { + // Decrement the count for subscribed address + activeSubscriptionsLogsAddressesGauge.Dec() newValue := value - count if newValue <= 0 { return 0, false @@ -119,9 +125,15 @@ func (a *LogsFilterAggregator) subtractLogFilters(f *LogsFilter) { return nil }) a.aggLogsFilter.allTopics -= f.allTopics + if f.allTopics > 0 { + // Decrement the count for AllTopics + activeSubscriptionsLogsAllTopicsGauge.Dec() + } f.topics.Range(func(topic libcommon.Hash, count int) error { a.aggLogsFilter.topics.Do(topic, func(value int, exists bool) (int, bool) { if exists { + // Decrement the count for subscribed topic + activeSubscriptionsLogsTopicsGauge.Dec() newValue := value - count if newValue <= 0 { return 0, false @@ -141,14 +153,26 @@ func (a *LogsFilterAggregator) addLogsFilters(f *LogsFilter) { a.logsFilterLock.Lock() defer a.logsFilterLock.Unlock() a.aggLogsFilter.allAddrs += f.allAddrs + if f.allAddrs > 0 { + // Increment the count for AllAddresses + activeSubscriptionsLogsAllAddressesGauge.Inc() + } f.addrs.Range(func(addr libcommon.Address, count int) error { + // Increment the count for subscribed address + activeSubscriptionsLogsAddressesGauge.Inc() a.aggLogsFilter.addrs.DoAndStore(addr, func(value int, exists bool) int { return value + count }) return nil }) a.aggLogsFilter.allTopics += f.allTopics + if f.allTopics > 0 { + // Increment the count for AllTopics + activeSubscriptionsLogsAllTopicsGauge.Inc() + } f.topics.Range(func(topic libcommon.Hash, count int) error { + // Increment the count for subscribed topic + activeSubscriptionsLogsTopicsGauge.Inc() a.aggLogsFilter.topics.DoAndStore(topic, func(value int, exists bool) int { return value + count }) @@ -179,6 +203,10 @@ func (a *LogsFilterAggregator) getAggMaps() (map[libcommon.Address]int, map[libc func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply) error { a.logsFilterLock.RLock() defer a.logsFilterLock.RUnlock() + + var lg types2.Log + var topics []libcommon.Hash + a.logsFilters.Range(func(k LogsSubID, filter *LogsFilter) error { if filter.allAddrs == 0 { _, addrOk := filter.addrs.Get(gointerfaces.ConvertH160toAddress(eventLog.Address)) @@ -186,27 +214,34 @@ func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply return nil } } - var topics []libcommon.Hash + + // Pre-allocate topics slice to the required size to avoid multiple allocations + topics = topics[:0] + if cap(topics) < len(eventLog.Topics) { + topics = make([]libcommon.Hash, 0, len(eventLog.Topics)) + } for _, topic := range eventLog.Topics { topics = append(topics, gointerfaces.ConvertH256ToHash(topic)) } + if filter.allTopics == 0 { if !a.chooseTopics(filter, topics) { return nil } } - lg := &types2.Log{ - Address: gointerfaces.ConvertH160toAddress(eventLog.Address), - Topics: topics, - Data: eventLog.Data, - BlockNumber: eventLog.BlockNumber, - TxHash: gointerfaces.ConvertH256ToHash(eventLog.TransactionHash), - TxIndex: uint(eventLog.TransactionIndex), - BlockHash: gointerfaces.ConvertH256ToHash(eventLog.BlockHash), - Index: uint(eventLog.LogIndex), - Removed: eventLog.Removed, - } - filter.sender.Send(lg) + + // Reuse lg object to avoid creating new instances + lg.Address = gointerfaces.ConvertH160toAddress(eventLog.Address) + lg.Topics = topics + lg.Data = eventLog.Data + lg.BlockNumber = eventLog.BlockNumber + lg.TxHash = gointerfaces.ConvertH256ToHash(eventLog.TransactionHash) + lg.TxIndex = uint(eventLog.TransactionIndex) + lg.BlockHash = gointerfaces.ConvertH256ToHash(eventLog.BlockHash) + lg.Index = uint(eventLog.LogIndex) + lg.Removed = eventLog.Removed + + filter.sender.Send(&lg) return nil }) return nil diff --git a/turbo/rpchelper/metrics.go b/turbo/rpchelper/metrics.go new file mode 100644 index 00000000000..41e963d3285 --- /dev/null +++ b/turbo/rpchelper/metrics.go @@ -0,0 +1,19 @@ +package rpchelper + +import ( + "github.com/ledgerwatch/erigon-lib/metrics" +) + +const ( + filterLabelName = "filter" + clientLabelName = "client" +) + +var ( + activeSubscriptionsGauge = metrics.GetOrCreateGaugeVec("subscriptions", []string{filterLabelName}, "Current number of subscriptions") + activeSubscriptionsLogsAllAddressesGauge = metrics.GetOrCreateGauge("subscriptions_logs_all_addresses") + activeSubscriptionsLogsAllTopicsGauge = metrics.GetOrCreateGauge("subscriptions_logs_all_topics") + activeSubscriptionsLogsAddressesGauge = metrics.GetOrCreateGauge("subscriptions_logs_addresses") + activeSubscriptionsLogsTopicsGauge = metrics.GetOrCreateGauge("subscriptions_logs_topics") + activeSubscriptionsLogsClientGauge = metrics.GetOrCreateGaugeVec("subscriptions_logs_client", []string{clientLabelName}, "Current number of subscriptions by client") +) From 7b4b8b5c823915e00271cab042b51ce0ab28caad Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Fri, 6 Sep 2024 14:41:54 +0100 Subject: [PATCH 38/49] Prevent invalid slice operations in filters (#10826) (#11908) relates to https://github.com/erigontech/erigon/issues/11890 cherry pick from E3 to E2: https://github.com/erigontech/erigon/commit/b760da20ce8da211e2ae877270415f64c3ab5fa8 ---- - Simplify and enhance tests. - Add test for invalid slice operations panic. - Remove unused Mutex ---- ### Issue I experienced a rare panic with the new filter code. ```text panic: runtime error: slice bounds out of range [121:100] goroutine 25311363 [running]: github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs.func1({0xc011c67020?, 0xc00a049e20?, 0xc020720b40?}, 0x48?) github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go:720 +0x31a github.com/ledgerwatch/erigon-lib/common/concurrent.(*SyncMap[...]).DoAndStore.func1(0x20?) github.com/ledgerwatch/erigon-lib@v1.0.0/common/concurrent/concurrent.go:52 +0x22 github.com/ledgerwatch/erigon-lib/common/concurrent.(*SyncMap[...]).Do(0x33209a0, {0xc013409fa0, 0x20}, 0xc00a049ee0) github.com/ledgerwatch/erigon-lib@v1.0.0/common/concurrent/concurrent.go:40 +0xff github.com/ledgerwatch/erigon-lib/common/concurrent.(*SyncMap[...]).DoAndStore(0xc0097a3c70?, {0xc013409fa0?, 0x30?}, 0xc000bc7d40?) github.com/ledgerwatch/erigon-lib@v1.0.0/common/concurrent/concurrent.go:51 +0x4b github.com/ledgerwatch/erigon/turbo/rpchelper.(*Filters).AddPendingTxs(0xc010d587d0?, {0xc013409fa0?, 0xc0097de0f0?}, {0xc01239a800?, 0xc00c820500?, 0xc011beee70?}) github.com/ledgerwatch/erigon/turbo/rpchelper/filters.go:698 +0x6b github.com/ledgerwatch/erigon/turbo/jsonrpc.(*APIImpl).NewPendingTransactionFilter.func1() github.com/ledgerwatch/erigon/turbo/jsonrpc/eth_filters.go:24 +0x88 created by github.com/ledgerwatch/erigon/turbo/jsonrpc.(*APIImpl).NewPendingTransactionFilter github.com/ledgerwatch/erigon/turbo/jsonrpc/eth_filters.go:22 +0xca ``` ### Resolution 1. Create a unit test reproducing the panic. 2. Ensure the slicing indices are calculated correctly and do not produce an invalid range. ### Running the new unit test on unfixed code: ```bash $ go test --- FAIL: TestFilters_AddPendingTxs (0.00s) --- FAIL: TestFilters_AddPendingTxs/TriggerPanic (0.00s) filters_test.go:451: AddPendingTxs caused a panic: runtime error: slice bounds out of range [10:5] FAIL exit status 1 FAIL github.com/ledgerwatch/erigon/turbo/rpchelper 0.454s ``` Co-authored-by: Bret <787344+bretep@users.noreply.github.com> --- turbo/rpchelper/filters.go | 44 +++++- turbo/rpchelper/filters_test.go | 241 ++++++++++++++++---------------- 2 files changed, 160 insertions(+), 125 deletions(-) diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index 2057150c493..28f508c49b3 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -44,7 +44,6 @@ type Filters struct { logsRequestor atomic.Value onNewSnapshot func() - storeMu sync.Mutex logsStores *concurrent.SyncMap[LogsSubID, []*types.Log] pendingHeadsStores *concurrent.SyncMap[HeadsSubID, []*types.Header] pendingTxsStores *concurrent.SyncMap[PendingTxsSubID, [][]types.Transaction] @@ -647,8 +646,20 @@ func (ff *Filters) AddLogs(id LogsSubID, log *types.Log) { maxLogs := ff.config.RpcSubscriptionFiltersMaxLogs if maxLogs > 0 && len(st)+1 > maxLogs { - st = st[len(st)+1-maxLogs:] // Remove oldest logs to make space + // Calculate the number of logs to remove + excessLogs := len(st) + 1 - maxLogs + if excessLogs > 0 { + if excessLogs >= len(st) { + // If excessLogs is greater than or equal to the length of st, remove all + st = []*types.Log{} + } else { + // Otherwise, remove the oldest logs + st = st[excessLogs:] + } + } } + + // Append the new log st = append(st, log) return st }) @@ -672,9 +683,21 @@ func (ff *Filters) AddPendingBlock(id HeadsSubID, block *types.Header) { } maxHeaders := ff.config.RpcSubscriptionFiltersMaxHeaders - if maxHeaders > 0 && len(st) >= maxHeaders { - st = st[1:] // Remove the oldest header to make space + if maxHeaders > 0 && len(st)+1 > maxHeaders { + // Calculate the number of headers to remove + excessHeaders := len(st) + 1 - maxHeaders + if excessHeaders > 0 { + if excessHeaders >= len(st) { + // If excessHeaders is greater than or equal to the length of st, remove all + st = []*types.Header{} + } else { + // Otherwise, remove the oldest headers + st = st[excessHeaders:] + } + } } + + // Append the new header st = append(st, block) return st }) @@ -712,9 +735,16 @@ func (ff *Filters) AddPendingTxs(id PendingTxsSubID, txs []types.Transaction) { flatSt = append(flatSt, txBatch...) } - // Remove the oldest transactions to make space for new ones - if len(flatSt)+len(txs) > maxTxs { - flatSt = flatSt[len(flatSt)+len(txs)-maxTxs:] + // Calculate how many transactions need to be removed + excessTxs := len(flatSt) + len(txs) - maxTxs + if excessTxs > 0 { + if excessTxs >= len(flatSt) { + // If excessTxs is greater than or equal to the length of flatSt, remove all + flatSt = []types.Transaction{} + } else { + // Otherwise, remove the oldest transactions + flatSt = flatSt[excessTxs:] + } } // Convert flatSt back to [][]types.Transaction with a single batch diff --git a/turbo/rpchelper/filters_test.go b/turbo/rpchelper/filters_test.go index 593d2662c7e..3be33ff7f9b 100644 --- a/turbo/rpchelper/filters_test.go +++ b/turbo/rpchelper/filters_test.go @@ -356,131 +356,136 @@ func TestFilters_SubscribeLogsGeneratesCorrectLogFilterRequest(t *testing.T) { } func TestFilters_AddLogs(t *testing.T) { - config := FiltersConfig{RpcSubscriptionFiltersMaxLogs: 5} - f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) - logID := LogsSubID("test-log") - logEntry := &types.Log{} - - // Add 10 logs to the store, but limit is 5 - for i := 0; i < 10; i++ { - f.AddLogs(logID, logEntry) - } - - logs, found := f.ReadLogs(logID) - if !found { - t.Error("expected to find logs in the store") - } - if len(logs) != 5 { - t.Errorf("expected 5 logs in the store, got %d", len(logs)) - } -} - -func TestFilters_AddLogs_Unlimited(t *testing.T) { - config := FiltersConfig{RpcSubscriptionFiltersMaxLogs: 0} - f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) - logID := LogsSubID("test-log") - logEntry := &types.Log{} - - // Add 10 logs to the store, limit is unlimited - for i := 0; i < 10; i++ { - f.AddLogs(logID, logEntry) - } - - logs, found := f.ReadLogs(logID) - if !found { - t.Error("expected to find logs in the store") - } - if len(logs) != 10 { - t.Errorf("expected 10 logs in the store, got %d", len(logs)) + tests := []struct { + name string + maxLogs int + numToAdd int + expectedLen int + }{ + {"WithinLimit", 5, 5, 5}, + {"ExceedingLimit", 2, 3, 2}, + {"UnlimitedLogs", 0, 10, 10}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxLogs: tt.maxLogs} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + logID := LogsSubID("test-log") + logEntry := &types.Log{Address: libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87")} + + for i := 0; i < tt.numToAdd; i++ { + f.AddLogs(logID, logEntry) + } + + logs, found := f.logsStores.Get(logID) + if !found { + t.Fatal("Expected to find logs in the store") + } + if len(logs) != tt.expectedLen { + t.Fatalf("Expected %d logs, but got %d", tt.expectedLen, len(logs)) + } + }) } } func TestFilters_AddPendingBlocks(t *testing.T) { - config := FiltersConfig{RpcSubscriptionFiltersMaxHeaders: 3} - f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) - headerID := HeadsSubID("test-header") - header := &types.Header{} - - // Add 5 headers to the store, but limit is 3 - for i := 0; i < 5; i++ { - f.AddPendingBlock(headerID, header) - } - - headers, found := f.ReadPendingBlocks(headerID) - if !found { - t.Error("expected to find headers in the store") - } - if len(headers) != 3 { - t.Errorf("expected 3 headers in the store, got %d", len(headers)) - } -} - -func TestFilters_AddPendingBlocks_Unlimited(t *testing.T) { - config := FiltersConfig{RpcSubscriptionFiltersMaxHeaders: 0} - f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) - headerID := HeadsSubID("test-header") - header := &types.Header{} - - // Add 5 headers to the store, limit is unlimited - for i := 0; i < 5; i++ { - f.AddPendingBlock(headerID, header) - } - - headers, found := f.ReadPendingBlocks(headerID) - if !found { - t.Error("expected to find headers in the store") - } - if len(headers) != 5 { - t.Errorf("expected 5 headers in the store, got %d", len(headers)) + tests := []struct { + name string + maxHeaders int + numToAdd int + expectedLen int + }{ + {"WithinLimit", 3, 3, 3}, + {"ExceedingLimit", 2, 5, 2}, + {"UnlimitedHeaders", 0, 10, 10}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxHeaders: tt.maxHeaders} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + blockID := HeadsSubID("test-block") + header := &types.Header{} + + for i := 0; i < tt.numToAdd; i++ { + f.AddPendingBlock(blockID, header) + } + + blocks, found := f.pendingHeadsStores.Get(blockID) + if !found { + t.Fatal("Expected to find blocks in the store") + } + if len(blocks) != tt.expectedLen { + t.Fatalf("Expected %d blocks, but got %d", tt.expectedLen, len(blocks)) + } + }) } } func TestFilters_AddPendingTxs(t *testing.T) { - config := FiltersConfig{RpcSubscriptionFiltersMaxTxs: 4} - f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) - txID := PendingTxsSubID("test-tx") - var tx types.Transaction = types.NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), uint256.NewInt(10), 50000, uint256.NewInt(10), nil) - tx, _ = tx.WithSignature(*types.LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) - - // Add 6 txs to the store, but limit is 4 - for i := 0; i < 6; i++ { - f.AddPendingTxs(txID, []types.Transaction{tx}) - } - - txs, found := f.ReadPendingTxs(txID) - if !found { - t.Error("expected to find txs in the store") - } - totalTxs := 0 - for _, batch := range txs { - totalTxs += len(batch) - } - if totalTxs != 4 { - t.Errorf("expected 4 txs in the store, got %d", totalTxs) - } -} - -func TestFilters_AddPendingTxs_Unlimited(t *testing.T) { - config := FiltersConfig{RpcSubscriptionFiltersMaxTxs: 0} - f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) - txID := PendingTxsSubID("test-tx") - var tx types.Transaction = types.NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), uint256.NewInt(10), 50000, uint256.NewInt(10), nil) - tx, _ = tx.WithSignature(*types.LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) - - // Add 6 txs to the store, limit is unlimited - for i := 0; i < 6; i++ { - f.AddPendingTxs(txID, []types.Transaction{tx}) - } - - txs, found := f.ReadPendingTxs(txID) - if !found { - t.Error("expected to find txs in the store") - } - totalTxs := 0 - for _, batch := range txs { - totalTxs += len(batch) - } - if totalTxs != 6 { - t.Errorf("expected 6 txs in the store, got %d", totalTxs) + tests := []struct { + name string + maxTxs int + numToAdd int + expectedLen int + }{ + {"WithinLimit", 5, 5, 5}, + {"ExceedingLimit", 2, 6, 2}, + {"UnlimitedTxs", 0, 10, 10}, + {"TriggerPanic", 5, 10, 0}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := FiltersConfig{RpcSubscriptionFiltersMaxTxs: tt.maxTxs} + f := New(context.TODO(), config, nil, nil, nil, func() {}, log.New()) + txID := PendingTxsSubID("test-tx") + var tx types.Transaction = types.NewTransaction(0, libcommon.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), uint256.NewInt(10), 50000, uint256.NewInt(10), nil) + tx, _ = tx.WithSignature(*types.LatestSignerForChainID(nil), libcommon.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) + + // Testing for panic + if tt.name == "TriggerPanic" { + defer func() { + if r := recover(); r != nil { + t.Errorf("AddPendingTxs caused a panic: %v", r) + } + }() + + // Add transactions to trigger panic + // Initial batch to set the stage + for i := 0; i < 4; i++ { + f.AddPendingTxs(txID, []types.Transaction{tx}) + } + + // Adding more transactions in smaller increments to ensure the panic + for i := 0; i < 2; i++ { + f.AddPendingTxs(txID, []types.Transaction{tx}) + } + + // Adding another large batch to ensure it exceeds the limit and triggers the panic + largeBatch := make([]types.Transaction, 10) + for i := range largeBatch { + largeBatch[i] = tx + } + f.AddPendingTxs(txID, largeBatch) + } else { + for i := 0; i < tt.numToAdd; i++ { + f.AddPendingTxs(txID, []types.Transaction{tx}) + } + + txs, found := f.ReadPendingTxs(txID) + if !found { + t.Fatal("Expected to find transactions in the store") + } + totalTxs := 0 + for _, batch := range txs { + totalTxs += len(batch) + } + if totalTxs != tt.expectedLen { + t.Fatalf("Expected %d transactions, but got %d", tt.expectedLen, totalTxs) + } + } + }) } } From 93ec6d96deeab3d88ba96fc5ac5cd4e03da11ee7 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:44:17 +0100 Subject: [PATCH 39/49] rpchelper: limits for filters by default (#11911) (#11912) cherry pick from E3 to E2 https://github.com/erigontech/erigon/commit/d7d9ded0ea3f5f887206f6c7f5e976eb58405154 relates to https://github.com/erigontech/erigon/issues/11890 --- turbo/rpchelper/config.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/turbo/rpchelper/config.go b/turbo/rpchelper/config.go index 21610b6a508..f0d17f7d590 100644 --- a/turbo/rpchelper/config.go +++ b/turbo/rpchelper/config.go @@ -14,9 +14,9 @@ type FiltersConfig struct { // These default values set no limits on the number of logs, block headers, transactions, // addresses, or topics that can be stored per subscription. var DefaultFiltersConfig = FiltersConfig{ - RpcSubscriptionFiltersMaxLogs: 0, // No limit on the number of logs per subscription - RpcSubscriptionFiltersMaxHeaders: 0, // No limit on the number of block headers per subscription - RpcSubscriptionFiltersMaxTxs: 0, // No limit on the number of transactions per subscription - RpcSubscriptionFiltersMaxAddresses: 0, // No limit on the number of addresses per subscription to filter logs by - RpcSubscriptionFiltersMaxTopics: 0, // No limit on the number of topics per subscription to filter logs by + RpcSubscriptionFiltersMaxLogs: 60, // Limit on the number of logs per subscription, 0 for no limit + RpcSubscriptionFiltersMaxHeaders: 60, // Limit on the number of block headers per subscription, 0 for no limit + RpcSubscriptionFiltersMaxTxs: 10_000, // Limit on the number of transactions per subscription, 0 for no limit + RpcSubscriptionFiltersMaxAddresses: 1_000, // Limit on the number of addresses per subscription to filter logs by, 0 for no limit + RpcSubscriptionFiltersMaxTopics: 1_000, // Limit on the number of topics per subscription to filter logs by, 0 for no limit } From 6b703df817288f500102023fa599e694c90e9390 Mon Sep 17 00:00:00 2001 From: Pratik Patil Date: Fri, 6 Sep 2024 20:17:32 +0530 Subject: [PATCH 40/49] [Polygon] Bor: Added Ahmedabad HF related configs and block number for amoy network (#11901) [Polygon] Bor: Added Ahmedabad HF related configs and block number for amoy network This is the Ahmedabad block number - [11865856](https://amoy.polygonscan.com/block/countdown/11865856) PR in bor - [bor#1324](https://github.com/maticnetwork/bor/pull/1324) --- params/chainspecs/amoy.json | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/params/chainspecs/amoy.json b/params/chainspecs/amoy.json index b753f752858..70a9b17f588 100644 --- a/params/chainspecs/amoy.json +++ b/params/chainspecs/amoy.json @@ -39,7 +39,24 @@ "delhiBlock": 73100, "indoreBlock": 73100, "agraBlock": 73100, - "napoliBlock": 5423600 + "napoliBlock": 5423600, + "ahmedabadBlock": 11865856, + "blockAlloc": { + "11865856": { + "0000000000000000000000000000000000001001": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100cf5760003560e01c80635407ca671161008c578063abca220411610066578063abca2204146102fa578063d72a0b6714610302578063ee3a87f21461031f578063f165053614610342576100cf565b80635407ca67146102585780636757e5d914610260578063942af179146102f2576100cf565b806303112a17146100d457806319494a17146100f357806330e69fc31461017e578063318926f7146101985780633434735f146101bc57806351950cd9146101c4575b600080fd5b6100f1600480360360208110156100ea57600080fd5b503561034a565b005b61016a6004803603604081101561010957600080fd5b8135919081019060408101602082013564010000000081111561012b57600080fd5b82018360208201111561013d57600080fd5b8035906020019184600183028401116401000000008311171561015f57600080fd5b50909250905061060d565b604080519115158252519081900360200190f35b61018661093e565b60408051918252519081900360200190f35b6101a0610944565b604080516001600160a01b039092168252519081900360200190f35b6101a0610968565b6100f160048036036102808110156101db57600080fd5b61020082013590610220830135906001600160a01b036102408501351690840184610280810161026082013564010000000081111561021957600080fd5b82018360208201111561022b57600080fd5b8035906020019184600183028401116401000000008311171561024d57600080fd5b509092509050610973565b610186610c78565b61027d6004803603602081101561027657600080fd5b5035610c7e565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102b757818101518382015260200161029f565b50505050905090810190601f1680156102e45780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610186610d19565b610186610d1f565b61016a6004803603602081101561031857600080fd5b5035610d25565b6100f16004803603604081101561033557600080fd5b5080359060200135610d3a565b610186610db0565b60008181526003602090815260409182902080548351601f60026000196101006001861615020190931692909204918201849004840281018401909452808452606093928301828280156103df5780601f106103b4576101008083540402835291602001916103df565b820191906000526020600020905b8154815290600101906020018083116103c257829003601f168201915b50505050509050805160001415610426576040805162461bcd60e51b815260206004820152600660248201526508599bdd5b9960d21b604482015290519081900360640190fd5b600082815260036020526040812061043d916112bb565b6000606082806020019051604081101561045657600080fd5b81516020830180516040519294929383019291908464010000000082111561047d57600080fd5b90830190602082018581111561049257600080fd5b82516401000000008111828201881017156104ac57600080fd5b82525081516020918201929091019080838360005b838110156104d95781810151838201526020016104c1565b50505050905090810190601f1680156105065780820380516001836020036101000a031916815260200191505b5060405250505091509150837f8797144948782adcede8e04bfa0bd8fd56941e0df7508bd02a629b477f7b073a60405160405180910390a2604080516313629df560e11b815260048101868152602482019283528351604483015283516001600160a01b038616936326c53bea938993879390929160640190602085019080838360005b838110156105a257818101518382015260200161058a565b50505050905090810190601f1680156105cf5780820380516001836020036101000a031916815260200191505b509350505050600060405180830381600087803b1580156105ef57600080fd5b505af1158015610603573d6000803e3d6000fd5b5050505050505050565b6000336002600160a01b031461065f576040805162461bcd60e51b81526020600482015260126024820152714e6f742053797374656d204164646573732160701b604482015290519081900360640190fd5b60606106a86106a385858080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250610db592505050565b610ddb565b905060006106c9826000815181106106bc57fe5b6020026020010151610f14565b90508060005460010114610724576040805162461bcd60e51b815260206004820152601b60248201527f537461746549647320617265206e6f742073657175656e7469616c0000000000604482015290519081900360640190fd5b6000805460019081018255835161074c918591811061073f57fe5b6020026020010151610f62565b9050606061076d8460028151811061076057fe5b6020026020010151610f82565b905061077882610fff565b15610933576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156107cf5781810151838201526020016107b7565b50505050905090810190601f1680156107fc5780820380516001836020036101000a031916815260200191505b5060408051601f198184030181529190526020810180516001600160e01b03166313629df560e11b1781528151919650600095508594509092509050828887f1604080518215158152905191985086917f5a22725590b0a51c923940223f7458512164b1113359a735e86e7f27f44791ee9181900360200190a28661093057838360405160200180836001600160a01b0316815260200180602001828103825283818151815260200191508051906020019080838360005b838110156108cc5781810151838201526020016108b4565b50505050905090810190601f1680156108f95780820380516001836020036101000a031916815260200191505b5060408051601f1981840301815291815260008b815260036020908152919020825161092e9750909550910192509050611302565b505b50505b505050509392505050565b60045481565b7f000000000000000000000000be971fef2bb60f709e1daf3e55d00914e230cd9481565b6002600160a01b0381565b6201000085106109be576040805162461bcd60e51b81526020600482015260116024820152700d2dcecc2d8d2c840d8cac2cc92dcc8caf607b1b604482015290519081900360640190fd5b60045460058054600101908190551115610a05576040805162461bcd60e51b8152602060048201526003602482015262195b9960ea1b604482015290519081900360640190fd5b60015480610a42576040805162461bcd60e51b8152602060048201526005602482015264085c9bdbdd60da1b604482015290519081900360640190fd5b60008585858560405160200180858152602001846001600160a01b0316815260200180602001828103825284848281815260200192508082843760008184015260408051601f19601f909301831690940184810390920184525250805160209091012096507f28cf91ac064e179f8a42e4b7a20ba080187781da55fd4f3f18870b7a25bacb559550505050828414801592509050610aef575060008281526002602052604090205460ff16155b610b29576040805162461bcd60e51b815260206004808301919091526024820152631d5cd95960e21b604482015290519081900360640190fd5b60008281526002602052604090819020805460ff191660011790558051610200818101909252610b74918b9060109083908390808284376000920191909152508b915085905061100b565b8314610bb0576040805162461bcd60e51b815260206004820152600660248201526510b83937b7b360d11b604482015290519081900360640190fd5b60405187907f8797144948782adcede8e04bfa0bd8fd56941e0df7508bd02a629b477f7b073a90600090a2604080516313629df560e11b81526004810189815260248201928352604482018790526001600160a01b038916926326c53bea928b928a928a92606401848480828437600081840152601f19601f820116905080830192505050945050505050600060405180830381600087803b158015610c5557600080fd5b505af1158015610c69573d6000803e3d6000fd5b50505050505050505050505050565b60005481565b60036020908152600091825260409182902080548351601f600260001961010060018616150201909316929092049182018490048402810184019094528084529091830182828015610d115780601f10610ce657610100808354040283529160200191610d11565b820191906000526020600020905b815481529060010190602001808311610cf457829003601f168201915b505050505081565b60055481565b60015481565b60026020526000908152604090205460ff1681565b336001600160a01b037f000000000000000000000000be971fef2bb60f709e1daf3e55d00914e230cd941614610da5576040805162461bcd60e51b815260206004820152600b60248201526a10b937b7ba29b2ba3a32b960a91b604482015290519081900360640190fd5b600191909155600455565b601081565b610dbd611380565b5060408051808201909152815181526020828101908201525b919050565b6060610de6826110b6565b610def57600080fd5b6000610dfa836110f0565b905060608167ffffffffffffffff81118015610e1557600080fd5b50604051908082528060200260200182016040528015610e4f57816020015b610e3c611380565b815260200190600190039081610e345790505b5090506000610e618560200151611148565b60208601510190506000805b84811015610eb857610e7e836111ab565b9150604051806040016040528083815260200184815250848281518110610ea157fe5b602090810291909101015291810191600101610e6d565b5085516020870151830314610f0a576040805162461bcd60e51b81526020600482015260136024820152722bb937b733903a37ba30b6103632b733ba341760691b604482015290519081900360640190fd5b5090949350505050565b805160009015801590610f2957508151602110155b610f3257600080fd5b600080610f3e84611244565b815191935091506020821015610f5a5760208290036101000a90045b949350505050565b8051600090601514610f7357600080fd5b610f7c82610f14565b92915050565b8051606090610f9057600080fd5b600080610f9c84611244565b9150915060608167ffffffffffffffff81118015610fb957600080fd5b506040519080825280601f01601f191660200182016040528015610fe4576020820181803683370190505b50905060208101610ff684828561126a565b50949350505050565b3b63ffffffff16151590565b600081815b6010811015610ff657600185821c8116141561106c5785816010811061103257fe5b60200201518260405160200180838152602001828152602001925050506040516020818303038152906040528051906020012091506110ae565b8186826010811061107957fe5b602002015160405160200180838152602001828152602001925050506040516020818303038152906040528051906020012091505b600101611010565b80516000906110c757506000610dd6565b6020820151805160001a9060c08210156110e657600092505050610dd6565b5060019392505050565b805160009061110157506000610dd6565b6000806111118460200151611148565b602085015185519181019250015b8082101561113f57611130826111ab565b6001909301929091019061111f565b50909392505050565b8051600090811a6080811015611162576000915050610dd6565b60b881108061117d575060c0811080159061117d575060f881105b1561118c576001915050610dd6565b60c08110156111a05760b519019050610dd6565b60f519019050610dd6565b80516000908190811a60808110156111c6576001915061123d565b60b88110156111db57607e198101915061123d565b60c08110156112085760b78103600185019450806020036101000a8551046001820181019350505061123d565b60f881101561121d5760be198101915061123d565b60f78103600185019450806020036101000a855104600182018101935050505b5092915050565b60008060006112568460200151611148565b602085015194519481019594039392505050565b80611274576112b6565b5b60208110611294578251825260209283019290910190601f1901611275565b80156112b6578251825160208390036101000a60001901801990921691161782525b505050565b50805460018160011615610100020316600290046000825580601f106112e157506112ff565b601f0160209004906000526020600020908101906112ff919061139a565b50565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061134357805160ff1916838001178555611370565b82800160010185558215611370579182015b82811115611370578251825591602001919060010190611355565b5061137c92915061139a565b5090565b604051806040016040528060008152602001600081525090565b5b8082111561137c576000815560010161139b56fea164736f6c634300060c000a" + }, + "0000000000000000000000000000000000001010": { + "balance": "0x0", + "code": "0x6080604052600436106101b75760003560e01c80638da5cb5b116100ec578063b789543c1161008a578063e614d0d611610064578063e614d0d614610695578063ed9ef524146106aa578063f2fde38b146106dd578063fc0c546a14610710576101b7565b8063b789543c14610626578063cc79f97b1461066b578063e306f77914610680576101b7565b806395d89b41116100c657806395d89b41146105a6578063a9059cbb146105bb578063abceeba2146105e7578063acd06cb3146105fc576101b7565b80638da5cb5b146105535780638f32d59b146105685780639025e64c14610591576101b7565b806347e7ef241161015957806370a082311161013357806370a082311461043c578063715018a61461046f578063771282f61461048457806377d32e9414610499576101b7565b806347e7ef24146103b3578063485cc955146103ec57806360f96a8f14610427576101b7565b806319d27d9c1161019557806319d27d9c146102a25780632e1a7d4d14610356578063313ce5671461037357806342fc47fb1461039e576101b7565b806306fdde03146101bc5780631499c5921461024657806318160ddd1461027b575b600080fd5b3480156101c857600080fd5b506101d1610725565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561020b5781810151838201526020016101f3565b50505050905090810190601f1680156102385780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025257600080fd5b506102796004803603602081101561026957600080fd5b50356001600160a01b031661075c565b005b34801561028757600080fd5b5061029061079c565b60408051918252519081900360200190f35b3480156102ae57600080fd5b5061033a600480360360a08110156102c557600080fd5b8101906020810181356401000000008111156102e057600080fd5b8201836020820111156102f257600080fd5b8035906020019184600183028401116401000000008311171561031457600080fd5b9193509150803590602081013590604081013590606001356001600160a01b03166107ac565b604080516001600160a01b039092168252519081900360200190f35b6102796004803603602081101561036c57600080fd5b50356107ee565b34801561037f57600080fd5b506103886108c6565b6040805160ff9092168252519081900360200190f35b3480156103aa57600080fd5b5061033a6108cb565b3480156103bf57600080fd5b50610279600480360360408110156103d657600080fd5b506001600160a01b0381351690602001356108da565b3480156103f857600080fd5b506102796004803603604081101561040f57600080fd5b506001600160a01b03813581169160200135166109a8565b34801561043357600080fd5b5061033a610a21565b34801561044857600080fd5b506102906004803603602081101561045f57600080fd5b50356001600160a01b0316610a30565b34801561047b57600080fd5b50610279610a3d565b34801561049057600080fd5b50610290610a98565b3480156104a557600080fd5b5061033a600480360360408110156104bc57600080fd5b813591908101906040810160208201356401000000008111156104de57600080fd5b8201836020820111156104f057600080fd5b8035906020019184600183028401116401000000008311171561051257600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610a9e945050505050565b34801561055f57600080fd5b5061033a610bc2565b34801561057457600080fd5b5061057d610bd1565b604080519115158252519081900360200190f35b34801561059d57600080fd5b506101d1610be2565b3480156105b257600080fd5b506101d1610c00565b61057d600480360360408110156105d157600080fd5b506001600160a01b038135169060200135610c1d565b3480156105f357600080fd5b50610290610c40565b34801561060857600080fd5b5061057d6004803603602081101561061f57600080fd5b5035610cc9565b34801561063257600080fd5b506102906004803603608081101561064957600080fd5b506001600160a01b038135169060208101359060408101359060600135610cde565b34801561067757600080fd5b50610290610cfd565b34801561068c57600080fd5b50610290610d03565b3480156106a157600080fd5b50610290610d09565b3480156106b657600080fd5b50610279600480360360208110156106cd57600080fd5b50356001600160a01b0316610d53565b3480156106e957600080fd5b506102796004803603602081101561070057600080fd5b50356001600160a01b0316610e05565b34801561071c57600080fd5b5061033a610e22565b60408051808201909152601781527f506f6c79676f6e2045636f73797374656d20546f6b656e000000000000000000602082015290565b6040805162461bcd60e51b815260206004820152601060248201526f44697361626c6564206665617475726560801b604482015290519081900360640190fd5b6b204fce5e3e2502611000000090565b6040805162461bcd60e51b815260206004820152601060248201526f44697361626c6564206665617475726560801b6044820152905160009181900360640190fd5b3360006107fa82610a30565b600654909150610810908463ffffffff610e3116565b600655821580159061082157508234145b610868576040805162461bcd60e51b8152602060048201526013602482015272125b9cdd59999a58da595b9d08185b5bdd5b9d606a1b604482015290519081900360640190fd5b6002546001600160a01b0380841691167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f85846108a487610a30565b60408051938452602084019290925282820152519081900360600190a3505050565b601290565b6003546001600160a01b031681565b6108e2610bd1565b6108eb57600080fd5b60008111801561090357506001600160a01b03821615155b61093e5760405162461bcd60e51b815260040180806020018281038252602381526020018061138d6023913960400191505060405180910390fd5b600061094983610a30565b60065490915061095f908363ffffffff610e4616565b60065561096c8383610e58565b6002546001600160a01b0380851691167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f684846108a488610a30565b60075460ff16156109ea5760405162461bcd60e51b815260040180806020018281038252602381526020018061136a6023913960400191505060405180910390fd5b6007805460ff19166001179055600280546001600160a01b0383166001600160a01b0319909116179055610a1d82610f17565b5050565b6004546001600160a01b031681565b6001600160a01b03163190565b610a45610bd1565b610a4e57600080fd5b600080546040516001600160a01b03909116907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0908390a3600080546001600160a01b0319169055565b60065481565b6000806000808451604114610ab95760009350505050610bbc565b50505060208201516040830151604184015160ff16601b811015610adb57601b015b8060ff16601b14158015610af357508060ff16601c14155b15610b045760009350505050610bbc565b6040805160008152602080820180845289905260ff8416828401526060820186905260808201859052915160019260a0808401939192601f1981019281900390910190855afa158015610b5b573d6000803e3d6000fd5b5050604051601f1901519450506001600160a01b038416610bb8576040805162461bcd60e51b815260206004820152601260248201527122b93937b91034b71032b1b932b1b7bb32b960711b604482015290519081900360640190fd5b5050505b92915050565b6000546001600160a01b031690565b6000546001600160a01b0316331490565b604051806040016040528060028152602001613a9960f01b81525081565b6040805180820190915260038152621413d360ea1b602082015290565b6000813414610c2e57506000610bbc565b610c39338484610f85565b9392505050565b6040518060800160405280605b8152602001611434605b91396040516020018082805190602001908083835b60208310610c8b5780518252601f199092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020526000908152604090205460ff1681565b6000610cf4610cef868686866111d2565b61128b565b95945050505050565b613a9981565b60015481565b6040518060800160405280605281526020016113b06052913960405160200180828051906020019080838360208310610c8b5780518252601f199092019160209182019101610c6c565b610d5b610bd1565b610d6457600080fd5b6001600160a01b038116610da95760405162461bcd60e51b81526004018080602001828103825260328152602001806114026032913960400191505060405180910390fd5b6003546040516001600160a01b038084169216907f1f9f3556dd336016cdf20adaead7d5c73665dba664b60e8c17e9a4eb91ce1d3990600090a3600380546001600160a01b0319166001600160a01b0392909216919091179055565b610e0d610bd1565b610e1657600080fd5b610e1f81610f17565b50565b6002546001600160a01b031681565b600082821115610e4057600080fd5b50900390565b600082820183811015610c3957600080fd5b60085415610e9a576040805162461bcd60e51b815260206004820152600a6024820152697265656e7472616e637960b01b604482015290519081900360640190fd5b6001600855604051611388906000906060906001600160a01b038616908490869085818181858888f193505050503d8060008114610ef4576040519150601f19603f3d011682016040523d82523d6000602084013e610ef9565b606091505b509150915081610f0b57805160208201fd5b50506000600855505050565b6001600160a01b038116610f2a57600080fd5b600080546040516001600160a01b03808516939216917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a3600080546001600160a01b0319166001600160a01b0392909216919091179055565b604080516370a0823160e01b81526001600160a01b03851660048201529051600091829130916370a08231916024808301926020929190829003018186803b158015610fd057600080fd5b505afa158015610fe4573d6000803e3d6000fd5b505050506040513d6020811015610ffa57600080fd5b5051604080516370a0823160e01b81526001600160a01b0387166004820152905191925060009130916370a08231916024808301926020929190829003018186803b15801561104857600080fd5b505afa15801561105c573d6000803e3d6000fd5b505050506040513d602081101561107257600080fd5b50519050611081868686611299565b600254604080516370a0823160e01b81526001600160a01b03898116600483018190529251818a1694909116917fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c49189918891889130916370a0823191602480820192602092909190829003018186803b1580156110fe57600080fd5b505afa158015611112573d6000803e3d6000fd5b505050506040513d602081101561112857600080fd5b5051604080516370a0823160e01b81526001600160a01b038f166004820152905130916370a08231916024808301926020929190829003018186803b15801561117057600080fd5b505afa158015611184573d6000803e3d6000fd5b505050506040513d602081101561119a57600080fd5b50516040805195865260208601949094528484019290925260608401526080830152519081900360a00190a450600195945050505050565b6000806040518060800160405280605b8152602001611434605b91396040516020018082805190602001908083835b602083106112205780518252601f199092019160209182019101611201565b51815160209384036101000a60001901801990921691161790526040805192909401828103601f1901835280855282519282019290922082526001600160a01b039b909b169a81019a909a5250880196909652505050606084019190915260808301525060a0902090565b6000610bbc82600154611347565b6001600160a01b0382163014156112ed576040805162461bcd60e51b8152602060048201526013602482015272063616e27742073656e6420746f204d5243323606c1b604482015290519081900360640190fd5b6112f78282610e58565b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a3505050565b60405161190160f01b815260028101919091526022810191909152604290209056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e7472616374294368696c6420746f6b656e3a206e6577206368696c64206164647265737320697320746865207a65726f2061646472657373546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a723158205f23be7574e70cfc01d0cfd6803b871f92465e9ae4a10fe95ed31ccb810bda3e64736f6c63430005110032" + }, + "360ad4f9a9A8EFe9A8DCB5f461c4Cc1047E1Dcf9": { + "balance": "0x0", + "code": "0x6080604052600436106100af576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100b9578063095ea7b31461014957806318160ddd146101ae57806323b872dd146101d95780632e1a7d4d1461025e578063313ce5671461028b57806370a08231146102bc57806395d89b4114610313578063a9059cbb146103a3578063d0e30db014610408578063dd62ed3e14610412575b6100b7610489565b005b3480156100c557600080fd5b506100ce610526565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561010e5780820151818401526020810190506100f3565b50505050905090810190601f16801561013b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015557600080fd5b50610194600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506105c4565b604051808215151515815260200191505060405180910390f35b3480156101ba57600080fd5b506101c36106b6565b6040518082815260200191505060405180910390f35b3480156101e557600080fd5b50610244600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506106d5565b604051808215151515815260200191505060405180910390f35b34801561026a57600080fd5b5061028960048036038101908080359060200190929190505050610a22565b005b34801561029757600080fd5b506102a0610b55565b604051808260ff1660ff16815260200191505060405180910390f35b3480156102c857600080fd5b506102fd600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b68565b6040518082815260200191505060405180910390f35b34801561031f57600080fd5b50610328610b80565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561036857808201518184015260208101905061034d565b50505050905090810190601f1680156103955780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156103af57600080fd5b506103ee600480360381019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610c1e565b604051808215151515815260200191505060405180910390f35b610410610489565b005b34801561041e57600080fd5b50610473600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c33565b6040518082815260200191505060405180910390f35b34600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055503373ffffffffffffffffffffffffffffffffffffffff167fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c346040518082815260200191505060405180910390a2565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105bc5780601f10610591576101008083540402835291602001916105bc565b820191906000526020600020905b81548152906001019060200180831161059f57829003601f168201915b505050505081565b600081600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60003073ffffffffffffffffffffffffffffffffffffffff1631905090565b600081600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561072557600080fd5b3373ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141580156107fd57507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b156109185781600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561088d57600080fd5b81600460008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055505b81600360008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610a7057600080fd5b80600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015610b03573d6000803e3d6000fd5b503373ffffffffffffffffffffffffffffffffffffffff167f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65826040518082815260200191505060405180910390a250565b600260009054906101000a900460ff1681565b60036020528060005260406000206000915090505481565b60018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610c165780601f10610beb57610100808354040283529160200191610c16565b820191906000526020600020905b815481529060010190602001808311610bf957829003601f168201915b505050505081565b6000610c2b3384846106d5565b905092915050565b60046020528160005260406000206020528060005260406000206000915091505054815600a165627a7a72305820ea7b3a90a89969eb00d2a56f58b0f80481944475908acf25438759b53be73e5b0029" + } + } + } } } \ No newline at end of file From 5b0fd0956947c8afc58241febb027fef32826e90 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Sat, 7 Sep 2024 02:51:17 +0100 Subject: [PATCH 41/49] Revert "rpchelper: limits for filters by default (#11911) (#11912)" (#11913) This reverts commit 93ec6d96deeab3d88ba96fc5ac5cd4e03da11ee7. based on advice from @bretep https://discord.com/channels/687972960811745322/983710221308416010/1281647424569344141 relates to https://github.com/erigontech/erigon/issues/11890 --- turbo/rpchelper/config.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/turbo/rpchelper/config.go b/turbo/rpchelper/config.go index f0d17f7d590..21610b6a508 100644 --- a/turbo/rpchelper/config.go +++ b/turbo/rpchelper/config.go @@ -14,9 +14,9 @@ type FiltersConfig struct { // These default values set no limits on the number of logs, block headers, transactions, // addresses, or topics that can be stored per subscription. var DefaultFiltersConfig = FiltersConfig{ - RpcSubscriptionFiltersMaxLogs: 60, // Limit on the number of logs per subscription, 0 for no limit - RpcSubscriptionFiltersMaxHeaders: 60, // Limit on the number of block headers per subscription, 0 for no limit - RpcSubscriptionFiltersMaxTxs: 10_000, // Limit on the number of transactions per subscription, 0 for no limit - RpcSubscriptionFiltersMaxAddresses: 1_000, // Limit on the number of addresses per subscription to filter logs by, 0 for no limit - RpcSubscriptionFiltersMaxTopics: 1_000, // Limit on the number of topics per subscription to filter logs by, 0 for no limit + RpcSubscriptionFiltersMaxLogs: 0, // No limit on the number of logs per subscription + RpcSubscriptionFiltersMaxHeaders: 0, // No limit on the number of block headers per subscription + RpcSubscriptionFiltersMaxTxs: 0, // No limit on the number of transactions per subscription + RpcSubscriptionFiltersMaxAddresses: 0, // No limit on the number of addresses per subscription to filter logs by + RpcSubscriptionFiltersMaxTopics: 0, // No limit on the number of topics per subscription to filter logs by } From 8ddff2afb811e4a4f82427a67d97757c822cf38f Mon Sep 17 00:00:00 2001 From: lupin012 <58134934+lupin012@users.noreply.github.com> Date: Mon, 9 Sep 2024 03:47:06 +0200 Subject: [PATCH 42/49] erigon_getBlockByTimestamp() add check on header == null (#11918) --- turbo/jsonrpc/erigon_block.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/turbo/jsonrpc/erigon_block.go b/turbo/jsonrpc/erigon_block.go index 7e4a1190665..1e9ae7a6389 100644 --- a/turbo/jsonrpc/erigon_block.go +++ b/turbo/jsonrpc/erigon_block.go @@ -89,6 +89,9 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti uintTimestamp := timeStamp.TurnIntoUint64() currentHeader := rawdb.ReadCurrentHeader(tx) + if currentHeader == nil { + return nil, fmt.Errorf("current header not found") + } currentHeaderTime := currentHeader.Time highestNumber := currentHeader.Number.Uint64() From 9d9f22cb05211949dc02e10972f82b628335e109 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Mon, 9 Sep 2024 02:50:23 +0100 Subject: [PATCH 43/49] diagnostics: all pprofs (#11891) (#11917) Extended pprof read API to include: goroutine, threadcreate, heap, allocs, block, mutex --- diagnostics/profile.go | 53 ++++++++++++++++++++++++++++++++++++++++++ diagnostics/setup.go | 1 + diagnostics/sysinfo.go | 16 ------------- 3 files changed, 54 insertions(+), 16 deletions(-) create mode 100644 diagnostics/profile.go diff --git a/diagnostics/profile.go b/diagnostics/profile.go new file mode 100644 index 00000000000..20ed970102d --- /dev/null +++ b/diagnostics/profile.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package diagnostics + +import ( + "fmt" + "net/http" + "runtime/pprof" + "strings" + + diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" +) + +func SetupProfileAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient) { + if metricsMux == nil { + return + } + + //handle all pprof, supported: goroutine, threadcreate, heap, allocs, block, mutex + metricsMux.HandleFunc("/pprof/", func(w http.ResponseWriter, r *http.Request) { + profile := strings.TrimPrefix(r.URL.Path, "/pprof/") + writePprofProfile(w, profile) + }) +} + +func writePprofProfile(w http.ResponseWriter, profile string) { + p := pprof.Lookup(profile) + if p == nil { + http.Error(w, "Unknown profile: "+profile, http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "aplication/profile") + err := p.WriteTo(w, 0) + if err != nil { + http.Error(w, fmt.Sprintf("Failed to write profile: %v", err), http.StatusInternalServerError) + return + } +} diff --git a/diagnostics/setup.go b/diagnostics/setup.go index 38ee64d1cf6..241afb03301 100644 --- a/diagnostics/setup.go +++ b/diagnostics/setup.go @@ -134,4 +134,5 @@ func SetupEndpoints(ctx *cli.Context, node *node.ErigonNode, diagMux *http.Serve SetupHeadersAccess(diagMux, diagnostic) SetupBodiesAccess(diagMux, diagnostic) SetupSysInfoAccess(diagMux, diagnostic) + SetupProfileAccess(diagMux, diagnostic) } diff --git a/diagnostics/sysinfo.go b/diagnostics/sysinfo.go index aae54428ed7..bf1d9498ecb 100644 --- a/diagnostics/sysinfo.go +++ b/diagnostics/sysinfo.go @@ -18,9 +18,7 @@ package diagnostics import ( "encoding/json" - "fmt" "net/http" - "runtime/pprof" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/sysutils" @@ -54,20 +52,6 @@ func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClien w.Header().Set("Content-Type", "application/json") writeMemoryInfo(w) }) - - metricsMux.HandleFunc("/heap-profile", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Content-Type", "aplication/profile") - writeHeapProfile(w) - }) -} - -func writeHeapProfile(w http.ResponseWriter) { - err := pprof.Lookup("heap").WriteTo(w, 0) - if err != nil { - http.Error(w, fmt.Sprintf("Failed to write profile: %v", err), http.StatusInternalServerError) - return - } } func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { From a50da8ee20168bff9b616d0da22e08f8f8fa6916 Mon Sep 17 00:00:00 2001 From: Dmytro Vovk Date: Mon, 9 Sep 2024 09:23:27 +0100 Subject: [PATCH 44/49] diagnostics: updated serving ui port (#11740) (#11916) - Changed UI serving port to 5137 as 6060 is busy with diag API - Refactored request headers to be set in middleware --- cmd/diag/ui/ui.go | 2 +- diagnostics/block_body_download_stats.go | 1 - diagnostics/bodies_info.go | 1 - diagnostics/bootnodes.go | 1 - diagnostics/cmd_line.go | 1 - diagnostics/db.go | 2 -- diagnostics/flags.go | 1 - diagnostics/header_downloader_stats.go | 1 - diagnostics/headers.go | 1 - diagnostics/logs.go | 2 -- diagnostics/mem.go | 1 - diagnostics/nodeinfo.go | 1 - diagnostics/peers.go | 1 - diagnostics/setup.go | 12 ++++++++++++ diagnostics/snapshot_sync.go | 5 ----- diagnostics/sysinfo.go | 19 +++++++++++++++---- diagnostics/version.go | 1 - 17 files changed, 28 insertions(+), 25 deletions(-) diff --git a/cmd/diag/ui/ui.go b/cmd/diag/ui/ui.go index 1620747b5d9..8ee1df2a006 100644 --- a/cmd/diag/ui/ui.go +++ b/cmd/diag/ui/ui.go @@ -23,7 +23,7 @@ var ( Name: "ui.addr", Usage: "URL to serve UI web application", Required: false, - Value: "127.0.0.1:6060", + Value: "127.0.0.1:5137", } ) diff --git a/diagnostics/block_body_download_stats.go b/diagnostics/block_body_download_stats.go index ba72e90490b..8b9bde12910 100644 --- a/diagnostics/block_body_download_stats.go +++ b/diagnostics/block_body_download_stats.go @@ -31,7 +31,6 @@ func SetupBlockBodyDownload(metricsMux *http.ServeMux) { } metricsMux.HandleFunc("/block_body_download", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") writeBlockBodyDownload(w, r) }) } diff --git a/diagnostics/bodies_info.go b/diagnostics/bodies_info.go index 6a656a0c339..5cb84d44cac 100644 --- a/diagnostics/bodies_info.go +++ b/diagnostics/bodies_info.go @@ -28,7 +28,6 @@ func SetupBodiesAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient } metricsMux.HandleFunc("/bodies", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeBodies(w, diag) }) diff --git a/diagnostics/bootnodes.go b/diagnostics/bootnodes.go index 06e9a766cce..6aeab140103 100644 --- a/diagnostics/bootnodes.go +++ b/diagnostics/bootnodes.go @@ -29,7 +29,6 @@ func SetupBootnodesAccess(metricsMux *http.ServeMux, node *node.ErigonNode) { } metricsMux.HandleFunc("/bootnodes", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") bootnodes := node.Node().Config().P2P.BootstrapNodesV5 diff --git a/diagnostics/cmd_line.go b/diagnostics/cmd_line.go index 6dd68cb92ff..7b623150866 100644 --- a/diagnostics/cmd_line.go +++ b/diagnostics/cmd_line.go @@ -29,7 +29,6 @@ func SetupCmdLineAccess(metricsMux *http.ServeMux) { } metricsMux.HandleFunc("/cmdline", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") var space []byte diff --git a/diagnostics/db.go b/diagnostics/db.go index b91cb3682ea..6eaae544cd3 100644 --- a/diagnostics/db.go +++ b/diagnostics/db.go @@ -43,12 +43,10 @@ func SetupDbAccess(ctx *cli.Context, metricsMux *http.ServeMux) { dataDir = paths.DataDirForNetwork(paths.DefaultDataDir(), ctx.String("chain")) } metricsMux.HandleFunc("/dbs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeDbList(w, dataDir) }) metricsMux.HandleFunc("/dbs/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") urlPath := r.URL.Path diff --git a/diagnostics/flags.go b/diagnostics/flags.go index d2902078423..5d2d0841486 100644 --- a/diagnostics/flags.go +++ b/diagnostics/flags.go @@ -29,7 +29,6 @@ func SetupFlagsAccess(ctx *cli.Context, metricsMux *http.ServeMux) { } metricsMux.HandleFunc("/flags", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") flags := map[string]interface{}{} diff --git a/diagnostics/header_downloader_stats.go b/diagnostics/header_downloader_stats.go index 74c84b114e2..7a38d8b19e5 100644 --- a/diagnostics/header_downloader_stats.go +++ b/diagnostics/header_downloader_stats.go @@ -31,7 +31,6 @@ func SetupHeaderDownloadStats(metricsMux *http.ServeMux) { } metricsMux.HandleFunc("/headers_download", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") writeHeaderDownload(w, r) }) } diff --git a/diagnostics/headers.go b/diagnostics/headers.go index da861ed9902..65a2cff29f5 100644 --- a/diagnostics/headers.go +++ b/diagnostics/headers.go @@ -28,7 +28,6 @@ func SetupHeadersAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClien } metricsMux.HandleFunc("/headers", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeHeaders(w, diag) }) diff --git a/diagnostics/logs.go b/diagnostics/logs.go index e953f4bc0fe..46b54a61de3 100644 --- a/diagnostics/logs.go +++ b/diagnostics/logs.go @@ -51,11 +51,9 @@ func SetupLogsAccess(ctx *cli.Context, metricsMux *http.ServeMux) { return } metricsMux.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") writeLogsList(w, dirPath) }) metricsMux.HandleFunc("/logs/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") writeLogsRead(w, r, dirPath) }) } diff --git a/diagnostics/mem.go b/diagnostics/mem.go index 32e2c8639ae..d788d1cc06d 100644 --- a/diagnostics/mem.go +++ b/diagnostics/mem.go @@ -29,7 +29,6 @@ func SetupMemAccess(metricsMux *http.ServeMux) { } metricsMux.HandleFunc("/mem", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeMem(w) }) diff --git a/diagnostics/nodeinfo.go b/diagnostics/nodeinfo.go index 9fd2bb49d8a..7931463e10a 100644 --- a/diagnostics/nodeinfo.go +++ b/diagnostics/nodeinfo.go @@ -29,7 +29,6 @@ func SetupNodeInfoAccess(metricsMux *http.ServeMux, node *node.ErigonNode) { } metricsMux.HandleFunc("/nodeinfo", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") writeNodeInfo(w, node) }) } diff --git a/diagnostics/peers.go b/diagnostics/peers.go index 2a5122a95c0..8dba8c4d626 100644 --- a/diagnostics/peers.go +++ b/diagnostics/peers.go @@ -58,7 +58,6 @@ func SetupPeersAccess(ctxclient *cli.Context, metricsMux *http.ServeMux, node *n } metricsMux.HandleFunc("/peers", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writePeers(w, ctxclient, node, diag) }) diff --git a/diagnostics/setup.go b/diagnostics/setup.go index 241afb03301..4cfa2e68180 100644 --- a/diagnostics/setup.go +++ b/diagnostics/setup.go @@ -114,6 +114,18 @@ func SetupMiddleMuxHandler(mux *http.ServeMux, middleMux *http.ServeMux, path st middleMux.HandleFunc(path+"/", func(w http.ResponseWriter, r *http.Request) { r.URL.Path = strings.TrimPrefix(r.URL.Path, path) r.URL.RawPath = strings.TrimPrefix(r.URL.RawPath, path) + + // Set CORS headers + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization") + + // Handle preflight requests + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + mux.ServeHTTP(w, r) }) } diff --git a/diagnostics/snapshot_sync.go b/diagnostics/snapshot_sync.go index b80d1093ab4..69e5018b5af 100644 --- a/diagnostics/snapshot_sync.go +++ b/diagnostics/snapshot_sync.go @@ -28,31 +28,26 @@ func SetupStagesAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClient } metricsMux.HandleFunc("/snapshot-sync", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeStages(w, diag) }) metricsMux.HandleFunc("/snapshot-files-list", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeFilesList(w, diag) }) metricsMux.HandleFunc("/resources-usage", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeResourcesUsage(w, diag) }) metricsMux.HandleFunc("/network-speed", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeNetworkSpeed(w, diag) }) metricsMux.HandleFunc("/sync-stages", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeSyncStages(w, diag) }) diff --git a/diagnostics/sysinfo.go b/diagnostics/sysinfo.go index bf1d9498ecb..25bf705bd9e 100644 --- a/diagnostics/sysinfo.go +++ b/diagnostics/sysinfo.go @@ -18,7 +18,9 @@ package diagnostics import ( "encoding/json" + "fmt" "net/http" + "runtime/pprof" diaglib "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/sysutils" @@ -30,28 +32,37 @@ func SetupSysInfoAccess(metricsMux *http.ServeMux, diag *diaglib.DiagnosticClien } metricsMux.HandleFunc("/hardware-info", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeHardwareInfo(w, diag) }) metricsMux.HandleFunc("/cpu-usage", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeCPUUsage(w) }) metricsMux.HandleFunc("/processes-info", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeProcessesInfo(w) }) metricsMux.HandleFunc("/memory-info", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") writeMemoryInfo(w) }) + + metricsMux.HandleFunc("/heap-profile", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "aplication/profile") + writeHeapProfile(w) + }) +} + +func writeHeapProfile(w http.ResponseWriter) { + err := pprof.Lookup("heap").WriteTo(w, 0) + if err != nil { + http.Error(w, fmt.Sprintf("Failed to write profile: %v", err), http.StatusInternalServerError) + return + } } func writeHardwareInfo(w http.ResponseWriter, diag *diaglib.DiagnosticClient) { diff --git a/diagnostics/version.go b/diagnostics/version.go index 742056b9430..520ea05dc01 100644 --- a/diagnostics/version.go +++ b/diagnostics/version.go @@ -31,7 +31,6 @@ func SetupVersionAccess(metricsMux *http.ServeMux) { } metricsMux.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(struct { Node int `json:"nodeVersion"` From 93016a97fca65c2376980da19f3944d15e4aacf4 Mon Sep 17 00:00:00 2001 From: VBulikov Date: Mon, 9 Sep 2024 15:12:26 +0200 Subject: [PATCH 45/49] Update version.go (#11928) --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 947146d3602..2e9f7e4c495 100644 --- a/params/version.go +++ b/params/version.go @@ -33,7 +33,7 @@ var ( const ( VersionMajor = 2 // Major version component of the current release VersionMinor = 60 // Minor version component of the current release - VersionMicro = 6 // Patch version component of the current release + VersionMicro = 7 // Patch version component of the current release VersionModifier = "" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" VersionKeyFinished = "ErigonVersionFinished" From 547c1a232af16cce31adf0c739f71967506729f5 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Fri, 13 Sep 2024 13:47:37 +0900 Subject: [PATCH 46/49] Fix linting --- erigon-lib/go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index e56e3b36ee4..c2c7ad02e10 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -270,8 +270,6 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7 github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb h1:bsoVxjnQGxhOODRmkdrbkRTB9+sIduguoNMSZPRRoTI= github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240805114253-42da880260bb/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= -github.com/ledgerwatch/interfaces v0.0.0-20240320062914-b57f05746087 h1:Y59HUAT/+02Qbm6g7MuY7i8E0kUihPe7+ftDnR8oQzQ= -github.com/ledgerwatch/interfaces v0.0.0-20240320062914-b57f05746087/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 90b97424a2143b36547e4ad06b0fffbb26d3a993 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sat, 14 Sep 2024 21:41:21 +0900 Subject: [PATCH 47/49] Fix compile error --- turbo/jsonrpc/eth_receipts_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/jsonrpc/eth_receipts_test.go b/turbo/jsonrpc/eth_receipts_test.go index 0d43b03ba49..a5ea49f5c2c 100644 --- a/turbo/jsonrpc/eth_receipts_test.go +++ b/turbo/jsonrpc/eth_receipts_test.go @@ -27,7 +27,7 @@ func TestGetReceipts(t *testing.T) { stateCache := kvcache.New(kvcache.DefaultCoherentConfig) ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mock.Mock(t)) mining := txpool.NewMiningClient(conn) - ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log) + ff := rpchelper.New(ctx, rpchelper.DefaultFiltersConfig, nil, nil, mining, func() {}, m.Log) api := NewEthAPI(NewBaseApi(ff, stateCache, m.BlockReader, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs, nil, nil), m.DB, nil, nil, nil, 5000000, 1e18, 100_000, false, 100_000, 128, log.New()) db := m.DB From 096df32fa995cbac17b0038707e55f7ea6a5db8e Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Sat, 14 Sep 2024 21:52:04 +0900 Subject: [PATCH 48/49] Use original release workflow for op-erigon --- .github/workflows/release.yml | 248 ++++++++++++++++++---------------- 1 file changed, 129 insertions(+), 119 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e46211b9048..536ce1d238f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -86,111 +86,121 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf ## v3.2.0 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db ## v3.6.1 - - - name: Build binaries with goreleaser - env: - BUILD_VERSION: ${{ inputs.release_version }} - DOCKER_URL: ${{ env.DOCKERHUB_REPOSITORY }} + - name: Run GoReleaser run: | - docker run --rm \ - -w /${{ env.APPLICATION }}/ \ - -e BUILD_VERSION=${{ env.BUILD_VERSION }} \ - -e GIT_COMMIT=${{ steps.getCommitId.outputs.id }} \ - -e GIT_BRANCH=${{ inputs.checkout_ref }} \ - -e GIT_TAG=${{ inputs.release_version }} \ - -e PACKAGE=${{ env.PACKAGE }} \ - -e APPLICATION=${{ env.APPLICATION }} \ - -v $(pwd):/${{ env.APPLICATION}} \ - -v /var/run/docker.sock:/var/run/docker.sock \ - ${{ env.BUILDER_IMAGE }} release --clean --skip=validate,announce,publish - echo "DEBUG: ls -lao in the working directory" - ls -lao - echo "DEBUG: content of the dist/ directory" - find dist/ -ls - - - name: Build and push multi-platform docker images (${{ env.BUILD_VERSION }} and latest) in case perform_release is true - if: ${{ inputs.perform_release }} + make release + docker images env: - BUILD_VERSION: ${{ inputs.release_version }} - DOCKER_URL: ${{ env.DOCKERHUB_REPOSITORY }} - DOCKER_PUBLISH_LATEST_CONDITION: ${{ inputs.publish_latest_tag && format('--tag {0}:latest ',env.DOCKERHUB_REPOSITORY) || '' }} - run: | - docker buildx build \ - --file ${{ env.DOCKERFILE_PATH }} \ - --build-arg DOCKER_BASE_IMAGE=${{ env.DOCKER_BASE_IMAGE }} \ - --build-arg VERSION=${{ env.BUILD_VERSION }} \ - --build-arg APPLICATION=${{ env.APPLICATION }} \ - --tag ${{ env.DOCKER_URL }}:${{ env.BUILD_VERSION }} \ - ${{ env.DOCKER_PUBLISH_LATEST_CONDITION }} \ - --label org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \ - --label org.opencontainers.image.authors="https://github.com/testinprod-io/op-erigon/graphs/contributors" \ - --label org.opencontainers.image.url="https://github.com/testinprod-io/op-erigon/blob/main/Dockerfile" \ - --label org.opencontainers.image.documentation="https://github.com/testinprod-io/op-erigon/blob/main/Dockerfile" \ - --label org.opencontainers.image.source="https://github.com/testinprod-io/op-erigon/blob/main/Dockerfile" \ - --label org.opencontainers.image.version=${{ inputs.release_version }} \ - --label org.opencontainers.image.revision=${{ steps.getCommitId.outputs.id }} \ - --label org.opencontainers.image.vcs-ref-short=${{ steps.getCommitId.outputs.short_commit_id }} \ - --label org.opencontainers.image.vendor="${{ github.repository_owner }}" \ - --label org.opencontainers.image.description="${{ env.LABEL_DESCRIPTION }}" \ - --label org.opencontainers.image.base.name="${{ env.DOCKER_BASE_IMAGE }}" \ - --push \ - --platform linux/amd64/v2,linux/arm64 . - - - name: Upload artifact -- linux/arm64 - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_arm64.tar.gz - path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_arm64.tar.gz - retention-days: 1 - compression-level: 0 - if-no-files-found: error - - - name: Upload artifact -- linux/amd64 - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_amd64.tar.gz - path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_amd64.tar.gz - retention-days: 1 - compression-level: 0 - if-no-files-found: error - - - name: Upload artifact -- darwin/arm64 - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_arm64.tar.gz - path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_arm64.tar.gz - retention-days: 1 - compression-level: 0 - if-no-files-found: error - - - name: Upload artifact -- darwin/amd64 - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_amd64.tar.gz - path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_amd64.tar.gz - retention-days: 1 - compression-level: 0 - if-no-files-found: error - - - name: Upload artifact -- windows/amd64 - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_windows_amd64.zip - path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_windows_amd64.zip - retention-days: 1 - compression-level: 0 - if-no-files-found: error - - - name: Upload artifact -- checksum - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 - with: - name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_checksums.txt - path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_checksums.txt - retention-days: 1 - compression-level: 0 - if-no-files-found: error + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VERSION: ${{ steps.prepare.outputs.tag_name }} + DOCKER_USERNAME: ${{ secrets.DOCKERHUB }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_KEY }} + + # - name: Set up Docker Buildx + # uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db ## v3.6.1 + + # - name: Build binaries with goreleaser + # env: + # BUILD_VERSION: ${{ inputs.release_version }} + # DOCKER_URL: ${{ env.DOCKERHUB_REPOSITORY }} + # run: | + # docker run --rm \ + # -w /${{ env.APPLICATION }}/ \ + # -e BUILD_VERSION=${{ env.BUILD_VERSION }} \ + # -e GIT_COMMIT=${{ steps.getCommitId.outputs.id }} \ + # -e GIT_BRANCH=${{ inputs.checkout_ref }} \ + # -e GIT_TAG=${{ inputs.release_version }} \ + # -e PACKAGE=${{ env.PACKAGE }} \ + # -e APPLICATION=${{ env.APPLICATION }} \ + # -v $(pwd):/${{ env.APPLICATION}} \ + # -v /var/run/docker.sock:/var/run/docker.sock \ + # ${{ env.BUILDER_IMAGE }} release --clean --skip=validate,announce,publish + # echo "DEBUG: ls -lao in the working directory" + # ls -lao + # echo "DEBUG: content of the dist/ directory" + # find dist/ -ls + + # - name: Build and push multi-platform docker images (${{ env.BUILD_VERSION }} and latest) in case perform_release is true + # if: ${{ inputs.perform_release }} + # env: + # BUILD_VERSION: ${{ inputs.release_version }} + # DOCKER_URL: ${{ env.DOCKERHUB_REPOSITORY }} + # DOCKER_PUBLISH_LATEST_CONDITION: ${{ inputs.publish_latest_tag && format('--tag {0}:latest ',env.DOCKERHUB_REPOSITORY) || '' }} + # run: | + # docker buildx build \ + # --file ${{ env.DOCKERFILE_PATH }} \ + # --build-arg DOCKER_BASE_IMAGE=${{ env.DOCKER_BASE_IMAGE }} \ + # --build-arg VERSION=${{ env.BUILD_VERSION }} \ + # --build-arg APPLICATION=${{ env.APPLICATION }} \ + # --tag ${{ env.DOCKER_URL }}:${{ env.BUILD_VERSION }} \ + # ${{ env.DOCKER_PUBLISH_LATEST_CONDITION }} \ + # --label org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \ + # --label org.opencontainers.image.authors="https://github.com/testinprod-io/op-erigon/graphs/contributors" \ + # --label org.opencontainers.image.url="https://github.com/testinprod-io/op-erigon/blob/main/Dockerfile" \ + # --label org.opencontainers.image.documentation="https://github.com/testinprod-io/op-erigon/blob/main/Dockerfile" \ + # --label org.opencontainers.image.source="https://github.com/testinprod-io/op-erigon/blob/main/Dockerfile" \ + # --label org.opencontainers.image.version=${{ inputs.release_version }} \ + # --label org.opencontainers.image.revision=${{ steps.getCommitId.outputs.id }} \ + # --label org.opencontainers.image.vcs-ref-short=${{ steps.getCommitId.outputs.short_commit_id }} \ + # --label org.opencontainers.image.vendor="${{ github.repository_owner }}" \ + # --label org.opencontainers.image.description="${{ env.LABEL_DESCRIPTION }}" \ + # --label org.opencontainers.image.base.name="${{ env.DOCKER_BASE_IMAGE }}" \ + # --push \ + # --platform linux/amd64/v2,linux/arm64 . + + # - name: Upload artifact -- linux/arm64 + # uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + # with: + # name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_arm64.tar.gz + # path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_arm64.tar.gz + # retention-days: 1 + # compression-level: 0 + # if-no-files-found: error + + # - name: Upload artifact -- linux/amd64 + # uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + # with: + # name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_amd64.tar.gz + # path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_linux_amd64.tar.gz + # retention-days: 1 + # compression-level: 0 + # if-no-files-found: error + + # - name: Upload artifact -- darwin/arm64 + # uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + # with: + # name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_arm64.tar.gz + # path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_arm64.tar.gz + # retention-days: 1 + # compression-level: 0 + # if-no-files-found: error + + # - name: Upload artifact -- darwin/amd64 + # uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + # with: + # name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_amd64.tar.gz + # path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_darwin_amd64.tar.gz + # retention-days: 1 + # compression-level: 0 + # if-no-files-found: error + + # - name: Upload artifact -- windows/amd64 + # uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + # with: + # name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_windows_amd64.zip + # path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_windows_amd64.zip + # retention-days: 1 + # compression-level: 0 + # if-no-files-found: error + + # - name: Upload artifact -- checksum + # uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a ## v4.3.6 + # with: + # name: ${{ env.APPLICATION }}_${{ inputs.release_version }}_checksums.txt + # path: ./dist/${{ env.APPLICATION }}_${{ inputs.release_version }}_checksums.txt + # retention-days: 1 + # compression-level: 0 + # if-no-files-found: error ## not required for now -- commented: # - name: Create and push a git tag for the released version in case perform_release is set @@ -200,19 +210,19 @@ jobs: # git config --global user.name "${{ env.GITHUB_AUTOMATION_NAME }}" # git tag -a ${{ inputs.release_version }} -m "Release ${{ inputs.release_version }}" # git push origin ${{ inputs.release_version }} - - - name: Publish draft of the Release notes with assets (without windows .zip) in case perform_release is set - if: ${{ inputs.perform_release }} - env: - GH_TOKEN: ${{ github.token }} - GH_REPO: ${{ github.repository }} - DOCKER_TAGS: ${{ env.DOCKERHUB_REPOSITORY }}:${{ inputs.release_version }} - GITHUB_RELEASE_TARGET: ${{ inputs.checkout_ref }} - run: | - cd dist - gh release create ${{ inputs.release_version }} *.tar.gz *_checksums.txt \ - --generate-notes \ - --target ${GITHUB_RELEASE_TARGET} \ - --draft=true \ - --title "${{ inputs.release_version }}" \ - --notes "**Improvements:**
- ...coming soon

**Bugfixes:**

- ...coming soon

**Docker images:**

Docker image released:
${{ env.DOCKER_TAGS }}

... coming soon
" \ No newline at end of file +# + # - name: Publish draft of the Release notes with assets (without windows .zip) in case perform_release is set + # if: ${{ inputs.perform_release }} + # env: + # GH_TOKEN: ${{ github.token }} + # GH_REPO: ${{ github.repository }} + # DOCKER_TAGS: ${{ env.DOCKERHUB_REPOSITORY }}:${{ inputs.release_version }} + # GITHUB_RELEASE_TARGET: ${{ inputs.checkout_ref }} + # run: | + # cd dist + # gh release create ${{ inputs.release_version }} *.tar.gz *_checksums.txt \ + # --generate-notes \ + # --target ${GITHUB_RELEASE_TARGET} \ + # --draft=true \ + # --title "${{ inputs.release_version }}" \ + # --notes "**Improvements:**
- ...coming soon

**Bugfixes:**

- ...coming soon

**Docker images:**

Docker image released:
${{ env.DOCKER_TAGS }}

... coming soon
" \ No newline at end of file From 2e191dffb25bc41df4f58c9c19e867a6bb327401 Mon Sep 17 00:00:00 2001 From: Minhyuk Kim Date: Wed, 18 Sep 2024 14:46:44 -0600 Subject: [PATCH 49/49] Update release.yml --- .github/workflows/release.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 536ce1d238f..717f6a1c160 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,7 +4,7 @@ env: APPLICATION: "op-erigon" BUILDER_IMAGE: "ghcr.io/goreleaser/goreleaser-cross:v1.21.13" DOCKER_BASE_IMAGE: "alpine:3.20.2" - APP_REPO: "testinprod/op-erigon" + APP_REPO: "testinprod-io/op-erigon" PACKAGE: "github.com/testinprod-io/op-erigon" DOCKERHUB_REPOSITORY: "testinprod/op-erigon" DOCKERFILE_PATH: "./Dockerfile.release" @@ -82,9 +82,17 @@ jobs: with: username: ${{ secrets.DOCKERHUB }} password: ${{ secrets.DOCKERHUB_KEY }} - + + - name: Prepare + id: prepare + run: | + TAG=${GITHUB_REF#refs/tags/} + echo "tag_name=${TAG}" >> $GITHUB_OUTPUT + - name: Set up QEMU uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf ## v3.2.0 + + - run: echo ${{ steps.prepare.outputs.tag_name }} - name: Run GoReleaser run: |