diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml index 1a6e0a0db74..415d0cdf961 100644 --- a/.github/workflows/qa-rpc-integration-tests.yml +++ b/.github/workflows/qa-rpc-integration-tests.yml @@ -87,41 +87,29 @@ jobs: # Run RPC integration test runner via http python3 ./run_tests.py -p 8545 --continue -f --json-diff -x \ - debug_accountRange,debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber,debug_storageRangeAt,debug_traceBlockByHash,\ - debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\ +# false positives: Erigon return expected response. but rpc-test somehow doesn't see 1 field. + erigon_getHeaderByHash,erigon_getHeaderByNumber,eth_feeHistory,\ +# total difficulty field was removed, then added back + eth_getBlockByHash,eth_getBlockByNumber,\ +# Erigon bugs + debug_accountRange,debug_storageRangeAt,\ +# need update rpc-test - because Erigon is correct (@AskAlexSharov will do after https://github.com/erigontech/erigon/pull/12634) + debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber,\ +# Erigon bug https://github.com/erigontech/erigon/issues/12603 + erigon_getLatestLogs,erigon_getLogsByHash/test_04.json,\ +# Erigon bug https://github.com/erigontech/erigon/issues/12637 debug_traceBlockByNumber/test_05.tar,debug_traceBlockByNumber/test_08.tar,debug_traceBlockByNumber/test_09.tar,debug_traceBlockByNumber/test_10.tar,debug_traceBlockByNumber/test_11.tar,debug_traceBlockByNumber/test_12.tar,\ +# remove this line after https://github.com/erigontech/rpc-tests/pull/281 + parity_getBlockReceipts,\ +# to investigate + debug_traceBlockByHash,\ + debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\ debug_traceTransaction,\ engine_exchangeCapabilities/test_1.json,\ engine_exchangeTransitionConfigurationV1/test_01.json,\ engine_getClientVersionV1/test_1.json,\ - erigon_getLogsByHash/test_04.json,\ - erigon_getHeaderByHash/test_02.json,\ - erigon_getHeaderByHash/test_03.json,\ - erigon_getHeaderByHash/test_04.json,\ - erigon_getHeaderByHash/test_06.json,\ - erigon_getHeaderByNumber/test_01.json,\ - erigon_getHeaderByNumber/test_02.json,\ - erigon_getHeaderByNumber/test_03.json,\ - erigon_getHeaderByNumber/test_04.json,\ - erigon_getHeaderByNumber/test_05.json,\ - erigon_getHeaderByNumber/test_06.json,\ - erigon_getHeaderByNumber/test_07.json,\ - erigon_getHeaderByNumber/test_08.json,\ - erigon_getLatestLogs/test_01.json,\ - erigon_getLatestLogs/test_02.json,\ - erigon_getLatestLogs/test_03.json,\ - erigon_getLatestLogs/test_04.json,\ - erigon_getLatestLogs/test_05.json,\ - erigon_getLatestLogs/test_06.json,\ - erigon_getLatestLogs/test_08.json,\ - erigon_getLatestLogs/test_09.json,\ - erigon_getLatestLogs/test_10.json,\ - erigon_getLatestLogs/test_11.json,\ - erigon_getLatestLogs/test_12.json,\ erigon_getBalanceChangesInBlock,\ eth_createAccessList/test_16.json,\ -# parity_getBlockReceipts was renamet to eth_getBlockReceipts - parity_getBlockReceipts,\ trace_filter/test_16.json,\ trace_rawTransaction/test_01.json,\ trace_rawTransaction/test_03.json,\ @@ -129,31 +117,6 @@ jobs: admin_peers/test_01.json,\ erigon_nodeInfo/test_1.json,\ eth_coinbase/test_01.json,\ - eth_feeHistory/test_01.json,\ - eth_feeHistory/test_02.json,\ - eth_feeHistory/test_03.json,\ - eth_feeHistory/test_04.json,\ - eth_feeHistory/test_05.json,\ - eth_feeHistory/test_06.json,\ - eth_feeHistory/test_08.json,\ - eth_feeHistory/test_09.json,\ - eth_feeHistory/test_10.json,\ - eth_feeHistory/test_11.json,\ - eth_getBlockByHash/test_01.json,\ - eth_getBlockByHash/test_02.json,\ - eth_getBlockByHash/test_05.json,\ - eth_getBlockByHash/test_06.json,\ - eth_getBlockByHash/test_07.json,\ - eth_getBlockByHash/test_08.json,\ - eth_getBlockByNumber/test_01.json,\ - eth_getBlockByNumber/test_02.json,\ - eth_getBlockByNumber/test_04.json,\ - eth_getBlockByNumber/test_05.json,\ - eth_getBlockByNumber/test_06.json,\ - eth_getBlockByNumber/test_07.json,\ - eth_getBlockByNumber/test_08.json,\ - eth_getBlockByNumber/test_12.json,\ - eth_getBlockByNumber/test_13.json,\ eth_getTransactionByHash/test_02.json,\ eth_getWork/test_01.json,\ eth_mining/test_01.json,\ diff --git a/README.md b/README.md index a863e843bb4..64f9ed319c5 100644 --- a/README.md +++ b/README.md @@ -203,18 +203,20 @@ du -hsc /erigon/snapshots/* ### Erigon3 changes from Erigon2 -- Initial sync does download LatestState and it's history - no re-exec from 0 anymore. -- ExecutionStage included many E2 stages: stage_hash_state, stage_trie, log_index, history_index, trace_index -- E3 can execute 1 historical transaction - without executing it's block - because history/indices have - transaction-granularity, instead of block-granularity. -- E3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper then in E2 - see point - above). -- Restart doesn't loose much partial progress: `--sync.loop.block.limit=5_000` enabled by default -- `chaindata` is less than `15gb`. It's ok to `rm -rf chaindata`. To prevent it's grow: recommend `--batchSize <= 1G` -- can symlink/mount latest state to fast drive and history to cheap drive -- `--internalcl` is enabled by default. to disable use `--externalcl` -- `--prune` flags changed: default `--prune.mode=archive`, FullNode: `--prune.mode=full`, MinimalNode (EIP-4444): - `--prune.mode=minimal`. +- **Initial sync doesn't re-exec from 0:** downloading 99% LatestState and History +- **Per-Transaction granularity of history** (Erigon2 had per-block). Means: + - Can execute 1 historical transaction - without executing it's block + - If account X change V1->V2->V1 within 1 block (different transactions): `debug_getModifiedAccountsByNumber` return + it + - Erigon3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper) +- **Validator mode**: added. `--internalcl` is enabled by default. to disable use `--externalcl`. +- **Store most of data in immutable files (segments/snapshots):** + - can symlink/mount latest state to fast drive and history to cheap drive + - `chaindata` is less than `15gb`. It's ok to `rm -rf chaindata`. (to prevent grow: recommend `--batchSize <= 1G`) +- **`--prune` flags changed**: see `--prune.mode` (default: `archive`, full: `full`, EIP-4444: `minimal`) +- **Other changes:** + - ExecutionStage included many E2 stages: stage_hash_state, stage_trie, log_index, history_index, trace_index + - Restart doesn't loose much partial progress: `--sync.loop.block.limit=5_000` enabled by default ### Logging diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go index 4b9e3fe6c97..c88cbce96b2 100644 --- a/cmd/devnet/devnetutils/utils.go +++ b/cmd/devnet/devnetutils/utils.go @@ -117,14 +117,14 @@ func UniqueIDFromEnode(enode string) (string, error) { return enode[:i], nil } -func RandomInt(max int) int { - if max == 0 { +func RandomInt(_max int) int { + if _max == 0 { return 0 } var n uint16 binary.Read(rand.Reader, binary.LittleEndian, &n) - return int(n) % (max + 1) + return int(n) % (_max + 1) } // NamespaceAndSubMethodFromMethod splits a parent method into namespace and the actual method @@ -142,10 +142,10 @@ func GenerateTopic(signature string) []libcommon.Hash { } // RandomNumberInRange returns a random number between min and max NOT inclusive -func RandomNumberInRange(min, max uint64) (uint64, error) { - if max <= min { - return 0, fmt.Errorf("Invalid range: upper bound %d less or equal than lower bound %d", max, min) +func RandomNumberInRange(_min, _max uint64) (uint64, error) { + if _max <= _min { + return 0, fmt.Errorf("Invalid range: upper bound %d less or equal than lower bound %d", _max, _min) } - return uint64(RandomInt(int(max-min)) + int(min)), nil + return uint64(RandomInt(int(_max-_min)) + int(_min)), nil } diff --git a/common/fdlimit/fdlimit_darwin.go b/common/fdlimit/fdlimit_darwin.go index c59be293476..7d8b7f2fd5c 100644 --- a/common/fdlimit/fdlimit_darwin.go +++ b/common/fdlimit/fdlimit_darwin.go @@ -27,7 +27,7 @@ const hardlimit = 10240 // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. // Returns the size it was set to (may differ from the desired 'max') -func Raise(max uint64) (uint64, error) { +func Raise(_max uint64) (uint64, error) { // Get the current limit var limit syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { @@ -35,8 +35,8 @@ func Raise(max uint64) (uint64, error) { } // Try to update the limit to the max allowance limit.Cur = limit.Max - if limit.Cur > max { - limit.Cur = max + if limit.Cur > _max { + limit.Cur = _max } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { return 0, err diff --git a/common/fdlimit/fdlimit_unix.go b/common/fdlimit/fdlimit_unix.go index 2f3ac908cc8..eebb72fde2e 100644 --- a/common/fdlimit/fdlimit_unix.go +++ b/common/fdlimit/fdlimit_unix.go @@ -26,7 +26,7 @@ import "syscall" // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. // Returns the size it was set to (may differ from the desired 'max') -func Raise(max uint64) (uint64, error) { +func Raise(_max uint64) (uint64, error) { // Get the current limit var limit syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { @@ -34,8 +34,8 @@ func Raise(max uint64) (uint64, error) { } // Try to update the limit to the max allowance limit.Cur = limit.Max - if limit.Cur > max { - limit.Cur = max + if limit.Cur > _max { + limit.Cur = _max } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { return 0, err diff --git a/common/fdlimit/fdlimit_windows.go b/common/fdlimit/fdlimit_windows.go index c7897072626..5a1137050bc 100644 --- a/common/fdlimit/fdlimit_windows.go +++ b/common/fdlimit/fdlimit_windows.go @@ -26,17 +26,17 @@ const hardlimit = 16384 // Raise tries to maximize the file descriptor allowance of this process // to the maximum hard-limit allowed by the OS. -func Raise(max uint64) (uint64, error) { +func Raise(_max uint64) (uint64, error) { // This method is NOP by design: // * Linux/Darwin counterparts need to manually increase per process limits // * On Windows Go uses the CreateFile API, which is limited to 16K files, non // changeable from within a running process // This way we can always "request" raising the limits, which will either have // or not have effect based on the platform we're running on. - if max > hardlimit { + if _max > hardlimit { return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit) } - return max, nil + return _max, nil } // Current retrieves the number of file descriptors allowed to be opened by this diff --git a/consensus/ethash/consensus_test.go b/consensus/ethash/consensus_test.go index 7ac1e5d5dd0..4436a0bfa46 100644 --- a/consensus/ethash/consensus_test.go +++ b/consensus/ethash/consensus_test.go @@ -95,11 +95,11 @@ func TestCalcDifficulty(t *testing.T) { } } -func randSlice(min, max uint32) []byte { +func randSlice(_min, _max uint32) []byte { var b = make([]byte, 4) rand.Read(b) a := binary.LittleEndian.Uint32(b) - size := min + a%(max-min) + size := _min + a%(_max-_min) out := make([]byte, size) rand.Read(out) return out diff --git a/core/types/encdec_test.go b/core/types/encdec_test.go index 69a19c5a0c1..296f5467fec 100644 --- a/core/types/encdec_test.go +++ b/core/types/encdec_test.go @@ -44,8 +44,8 @@ func NewTRand() *TRand { return &TRand{rnd: rand.New(src)} } -func (tr *TRand) RandIntInRange(min, max int) int { - return (tr.rnd.Intn(max-min) + min) +func (tr *TRand) RandIntInRange(_min, _max int) int { + return (tr.rnd.Intn(_max-_min) + _min) } func (tr *TRand) RandUint64() *uint64 { diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index b5aeb0f98b6..fde05e8680e 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -549,8 +549,8 @@ const N = 50 var dummyBlobTxs = [N]*BlobTx{} var dummyBlobWrapperTxs = [N]*BlobTxWrapper{} -func randIntInRange(min, max int) int { - return (rand.Intn(max-min) + min) +func randIntInRange(_min, _max int) int { + return (rand.Intn(_max-_min) + _min) } func randAddr() *libcommon.Address { diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go index f60de31aaff..6a13bff1e6a 100644 --- a/erigon-lib/chain/snapcfg/util.go +++ b/erigon-lib/chain/snapcfg/util.go @@ -245,7 +245,7 @@ func (p Preverified) Versioned(preferredVersion snaptype.Version, minVersion sna } func (p Preverified) MaxBlock(version snaptype.Version) (uint64, error) { - max := uint64(0) + _max := uint64(0) for _, p := range p { _, fileName := filepath.Split(p.Name) ext := filepath.Ext(fileName) @@ -261,16 +261,16 @@ func (p Preverified) MaxBlock(version snaptype.Version) (uint64, error) { return 0, err } - if max < to { - max = to + if _max < to { + _max = to } } - if max == 0 { // to prevent underflow + if _max == 0 { // to prevent underflow return 0, nil } - return max*1_000 - 1, nil + return _max*1_000 - 1, nil } var errWrongVersion = errors.New("wrong version") @@ -464,17 +464,17 @@ func MergeLimitFromCfg(cfg *Cfg, snapType snaptype.Enum, fromBlock uint64) uint6 } func MaxSeedableSegment(chain string, dir string) uint64 { - var max uint64 + var _max uint64 if list, err := snaptype.Segments(dir); err == nil { for _, info := range list { - if Seedable(chain, info) && info.Type.Enum() == snaptype.MinCoreEnum && info.To > max { - max = info.To + if Seedable(chain, info) && info.Type.Enum() == snaptype.MinCoreEnum && info.To > _max { + _max = info.To } } } - return max + return _max } var oldMergeSteps = append([]uint64{snaptype.Erigon2OldMergeLimit}, snaptype.MergeSteps...) @@ -498,14 +498,14 @@ func KnownCfg(networkName string) *Cfg { return newCfg(networkName, c.Typed(knownTypes[networkName])) } -func VersionedCfg(networkName string, preferred snaptype.Version, min snaptype.Version) *Cfg { +func VersionedCfg(networkName string, preferred snaptype.Version, _min snaptype.Version) *Cfg { c, ok := knownPreverified[networkName] if !ok { return newCfg(networkName, Preverified{}) } - return newCfg(networkName, c.Versioned(preferred, min)) + return newCfg(networkName, c.Versioned(preferred, _min)) } var KnownWebseeds = map[string][]string{ diff --git a/erigon-lib/common/cmp/cmp.go b/erigon-lib/common/cmp/cmp.go index 8ee45182c17..db832450987 100644 --- a/erigon-lib/common/cmp/cmp.go +++ b/erigon-lib/common/cmp/cmp.go @@ -21,12 +21,12 @@ import ( ) // InRange - ensure val is in [min,max] range -func InRange[T cmp.Ordered](min, max, val T) T { - if min >= val { - return min +func InRange[T cmp.Ordered](_min, _max, val T) T { + if _min >= val { + return _min } - if max <= val { - return max + if _max <= val { + return _max } return val } diff --git a/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go b/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go index 072e32b0888..23df6f186f4 100644 --- a/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go +++ b/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go @@ -11,7 +11,7 @@ import ( var hasBMI2 = cpu.X86.HasBMI2 -// go:noescape +//go:noescape func gfpNeg(c, a *gfP) //go:noescape diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index cf89315feb3..6fb5eb1cbfc 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -174,7 +174,7 @@ func insertCloudflareHeaders(req *http.Request) { // It also tries to parse Retry-After response header when a http.StatusTooManyRequests // (HTTP Code 429) is found in the resp parameter. Hence it will return the number of // seconds the server states it may be ready to process more requests from this client. -func calcBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { +func calcBackoff(_min, _max time.Duration, attemptNum int, resp *http.Response) time.Duration { if resp != nil { if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { if s, ok := resp.Header["Retry-After"]; ok { @@ -185,10 +185,10 @@ func calcBackoff(min, max time.Duration, attemptNum int, resp *http.Response) ti } } - mult := math.Pow(2, float64(attemptNum)) * float64(min) + mult := math.Pow(2, float64(attemptNum)) * float64(_min) sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max + if float64(sleep) != mult || sleep > _max { + sleep = _max } return sleep diff --git a/erigon-lib/rlp2/util.go b/erigon-lib/rlp2/util.go index 7cb1b78ed10..c3d1de93d81 100644 --- a/erigon-lib/rlp2/util.go +++ b/erigon-lib/rlp2/util.go @@ -76,7 +76,7 @@ func identifyToken(b byte) Token { return TokenLongBlob case b >= 192 && b <= 247: return TokenShortList - case b >= 248 && b <= 255: + case b >= 248: return TokenLongList } return TokenUnknown diff --git a/erigon-lib/seg/decompress_test.go b/erigon-lib/seg/decompress_test.go index 9e6ba470323..1568b06f4d9 100644 --- a/erigon-lib/seg/decompress_test.go +++ b/erigon-lib/seg/decompress_test.go @@ -566,10 +566,6 @@ func generateRandWords() { WORDS[N-1] = []byte{} } -func randIntInRange(min, max int) int { - return (rand.Intn(max-min) + min) -} - func clearPrevDict() { WORDS = [N][]byte{} WORD_FLAGS = [N]bool{} diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index fab8c6dd9ea..9146e971b20 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -1323,9 +1323,9 @@ func (it *InvertedIterator1) advanceInFiles() { } if !bytes.Equal(key, it.key) { ef, _ := eliasfano32.ReadEliasFano(val) - min := ef.Get(0) - max := ef.Max() - if min < it.endTxNum && max >= it.startTxNum { // Intersection of [min; max) and [it.startTxNum; it.endTxNum) + _min := ef.Get(0) + _max := ef.Max() + if _min < it.endTxNum && _max >= it.startTxNum { // Intersection of [min; max) and [it.startTxNum; it.endTxNum) it.key = key it.nextFileKey = key return diff --git a/erigon-lib/types/ssz/ssz.go b/erigon-lib/types/ssz/ssz.go index 60800543ae1..40d5ad3a19a 100644 --- a/erigon-lib/types/ssz/ssz.go +++ b/erigon-lib/types/ssz/ssz.go @@ -85,7 +85,7 @@ func UnmarshalUint64SSZ(x []byte) uint64 { return binary.LittleEndian.Uint64(x) } -func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint64, version int) ([]T, error) { +func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, _max uint64, version int) ([]T, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -96,7 +96,7 @@ func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint6 elementsNum = currentOffset / 4 } inPos := 4 - if uint64(elementsNum) > max { + if uint64(elementsNum) > _max { return nil, ErrTooBigList } objs := make([]T, elementsNum) @@ -121,7 +121,7 @@ func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint6 return objs, nil } -func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement uint32, max uint64, version int) ([]T, error) { +func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement uint32, _max uint64, version int) ([]T, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -131,7 +131,7 @@ func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement u if uint32(len(buf))%bytesPerElement != 0 { return nil, ErrBufferNotRounded } - if elementsNum > max { + if elementsNum > _max { return nil, ErrTooBigList } objs := make([]T, elementsNum) @@ -144,7 +144,7 @@ func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement u return objs, nil } -func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) { +func DecodeHashList(bytes []byte, start, end, _max uint32) ([]common.Hash, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -154,7 +154,7 @@ func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) if uint32(len(buf))%length.Hash != 0 { return nil, ErrBufferNotRounded } - if elementsNum > max { + if elementsNum > _max { return nil, ErrTooBigList } objs := make([]common.Hash, elementsNum) @@ -164,7 +164,7 @@ func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) return objs, nil } -func DecodeNumbersList(bytes []byte, start, end uint32, max uint64) ([]uint64, error) { +func DecodeNumbersList(bytes []byte, start, end uint32, _max uint64) ([]uint64, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } @@ -174,7 +174,7 @@ func DecodeNumbersList(bytes []byte, start, end uint32, max uint64) ([]uint64, e if uint64(len(buf))%length.BlockNum != 0 { return nil, ErrBufferNotRounded } - if elementsNum > max { + if elementsNum > _max { return nil, ErrTooBigList } objs := make([]uint64, elementsNum) @@ -195,12 +195,12 @@ func CalculateIndiciesLimit(maxCapacity, numItems, size uint64) uint64 { return numItems } -func DecodeString(bytes []byte, start, end, max uint64) ([]byte, error) { +func DecodeString(bytes []byte, start, end, _max uint64) ([]byte, error) { if start > end || len(bytes) < int(end) { return nil, ErrBadOffset } buf := bytes[start:end] - if uint64(len(buf)) > max { + if uint64(len(buf)) > _max { return nil, ErrTooBigList } return buf, nil diff --git a/eth/stagedsync/exec3_parallel.go b/eth/stagedsync/exec3_parallel.go index c65c83c863c..79aa06c6bcb 100644 --- a/eth/stagedsync/exec3_parallel.go +++ b/eth/stagedsync/exec3_parallel.go @@ -4,11 +4,12 @@ import ( "context" "errors" "fmt" - chaos_monkey "github.com/erigontech/erigon/tests/chaos-monkey" "sync" "sync/atomic" "time" + chaos_monkey "github.com/erigontech/erigon/tests/chaos-monkey" + "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" @@ -335,7 +336,7 @@ func (pe *parallelExecutor) rwLoop(ctx context.Context, maxTxNum uint64, logger defer tx.Rollback() pe.doms.SetTx(tx) - applyCtx, cancelApplyCtx = context.WithCancel(ctx) + applyCtx, cancelApplyCtx = context.WithCancel(ctx) //nolint:fatcontext defer cancelApplyCtx() pe.applyLoopWg.Add(1) go pe.applyLoop(applyCtx, maxTxNum, &blockComplete, pe.rwLoopErrCh) diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index d4a12250055..f8e056623e9 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -758,22 +758,22 @@ func (u *snapshotUploader) init(ctx context.Context, logger log.Logger) { } func (u *snapshotUploader) maxUploadedHeader() uint64 { - var max uint64 + var _max uint64 if len(u.files) > 0 { for _, state := range u.files { if state.local && state.remote { if state.info != nil { if state.info.Type.Enum() == coresnaptype.Enums.Headers { - if state.info.To > max { - max = state.info.To + if state.info.To > _max { + _max = state.info.To } } } else { if info, _, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, state.file); ok { if info.Type.Enum() == coresnaptype.Enums.Headers { - if info.To > max { - max = info.To + if info.To > _max { + _max = info.To } } state.info = &info @@ -783,7 +783,7 @@ func (u *snapshotUploader) maxUploadedHeader() uint64 { } } - return max + return _max } type dirEntry struct { @@ -1040,25 +1040,25 @@ func (u *snapshotUploader) downloadLatestSnapshots(ctx context.Context, blockNum } } - var min uint64 + var _min uint64 for _, info := range lastSegments { if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { - if min == 0 || lastInfo.From() < min { - min = lastInfo.From() + if _min == 0 || lastInfo.From() < _min { + _min = lastInfo.From() } } } for segType, info := range lastSegments { if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok { - if lastInfo.From() > min { + if lastInfo.From() > _min { for _, ent := range entries { if info, err := ent.Info(); err == nil { snapInfo, ok := info.Sys().(downloader.SnapInfo) if ok && snapInfo.Type().Enum() == segType && - snapInfo.From() == min { + snapInfo.From() == _min { lastSegments[segType] = info } } @@ -1088,17 +1088,17 @@ func (u *snapshotUploader) maxSeedableHeader() uint64 { } func (u *snapshotUploader) minBlockNumber() uint64 { - var min uint64 + var _min uint64 if list, err := snaptype.Segments(u.cfg.dirs.Snap); err == nil { for _, info := range list { - if u.seedable(info) && min == 0 || info.From < min { - min = info.From + if u.seedable(info) && _min == 0 || info.From < _min { + _min = info.From } } } - return min + return _min } func expandHomeDir(dirpath string) string { diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 3fb9a240d31..330fd7ac484 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -745,8 +745,8 @@ func contains(ns []*node, id enode.ID) bool { } // pushNode adds n to the front of list, keeping at most max items. -func pushNode(list []*node, n *node, max int) ([]*node, *node) { - if len(list) < max { +func pushNode(list []*node, n *node, _max int) ([]*node, *node) { + if len(list) < _max { list = append(list, nil) } removed := list[len(list)-1] diff --git a/p2p/netutil/iptrack.go b/p2p/netutil/iptrack.go index c902bf97dbc..36b3a1df5b9 100644 --- a/p2p/netutil/iptrack.go +++ b/p2p/netutil/iptrack.go @@ -82,15 +82,15 @@ func (it *IPTracker) PredictEndpoint() string { // The current strategy is simple: find the endpoint with most statements. counts := make(map[string]int) - maxcount, max := 0, "" + maxcount, _max := 0, "" for _, s := range it.statements { c := counts[s.endpoint] + 1 counts[s.endpoint] = c if c > maxcount && c >= it.minStatements { - maxcount, max = c, s.endpoint + maxcount, _max = c, s.endpoint } } - return max + return _max } // AddStatement records that a certain host thinks our external endpoint is the one given. diff --git a/polygon/bor/valset/validator_set.go b/polygon/bor/valset/validator_set.go index 6599c9f8f6c..1cc5e9e7c29 100644 --- a/polygon/bor/valset/validator_set.go +++ b/polygon/bor/valset/validator_set.go @@ -190,20 +190,20 @@ func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { panic("empty validator set") } - max := int64(math.MinInt64) - min := int64(math.MaxInt64) + _max := int64(math.MinInt64) + _min := int64(math.MaxInt64) for _, v := range vals.Validators { - if v.ProposerPriority < min { - min = v.ProposerPriority + if v.ProposerPriority < _min { + _min = v.ProposerPriority } - if v.ProposerPriority > max { - max = v.ProposerPriority + if v.ProposerPriority > _max { + _max = v.ProposerPriority } } - diff := max - min + diff := _max - _min if diff < 0 { return -1 * diff diff --git a/tests/fuzzers/difficulty/difficulty-fuzz.go b/tests/fuzzers/difficulty/difficulty-fuzz.go index 360d8581bd6..9e7b82d96b5 100644 --- a/tests/fuzzers/difficulty/difficulty-fuzz.go +++ b/tests/fuzzers/difficulty/difficulty-fuzz.go @@ -45,11 +45,11 @@ func (f *fuzzer) read(size int) []byte { return out } -func (f *fuzzer) readSlice(min, max int) []byte { +func (f *fuzzer) readSlice(_min, _max int) []byte { var a uint16 //nolint:errcheck binary.Read(f.input, binary.LittleEndian, &a) - size := min + int(a)%(max-min) + size := _min + int(a)%(_max-_min) out := make([]byte, size) if _, err := f.input.Read(out); err != nil { f.exhausted = true @@ -57,15 +57,15 @@ func (f *fuzzer) readSlice(min, max int) []byte { return out } -func (f *fuzzer) readUint64(min, max uint64) uint64 { - if min == max { - return min +func (f *fuzzer) readUint64(_min, _max uint64) uint64 { + if _min == _max { + return _min } var a uint64 if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { f.exhausted = true } - a = min + a%(max-min) + a = _min + a%(_max-_min) return a } func (f *fuzzer) readBool() bool { diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index d932b50ccf9..78e96f62f5e 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -21,14 +21,13 @@ import ( "errors" "fmt" - jsoniter "github.com/json-iterator/go" - "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/common/hexutil" "github.com/erigontech/erigon-lib/common/hexutility" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/kv/order" "github.com/erigontech/erigon-lib/kv/rawdbv3" + jsoniter "github.com/json-iterator/go" "github.com/erigontech/erigon/core/state" "github.com/erigontech/erigon/core/types/accounts" @@ -205,48 +204,40 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context, if startNum > endNum { return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum) } - //[from, to) startTxNum, err := txNumsReader.Min(tx, startNum) if err != nil { return nil, err } - endTxNum, err := txNumsReader.Max(tx, endNum-1) + endTxNum, err := txNumsReader.Min(tx, endNum) if err != nil { return nil, err } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + return getModifiedAccounts(tx.(kv.TemporalTx), startTxNum, endTxNum) } -// getModifiedAccountsV3 returns a list of addresses that were modified in the block range +// getModifiedAccounts returns a list of addresses that were modified in the block range // [startNum:endNum) -func getModifiedAccountsV3(tx kv.TemporalTx, startTxNum, endTxNum uint64) ([]common.Address, error) { +func getModifiedAccounts(tx kv.TemporalTx, startTxNum, endTxNum uint64) ([]common.Address, error) { it, err := tx.HistoryRange(kv.AccountsHistory, int(startTxNum), int(endTxNum), order.Asc, kv.Unlim) if err != nil { return nil, err } defer it.Close() - changedAddrs := make(map[common.Address]struct{}) + var result []common.Address + saw := make(map[common.Address]struct{}) for it.HasNext() { k, _, err := it.Next() if err != nil { return nil, err } - changedAddrs[common.BytesToAddress(k)] = struct{}{} - } - - if len(changedAddrs) == 0 { - return nil, nil - } - - idx := 0 - result := make([]common.Address, len(changedAddrs)) - for addr := range changedAddrs { - copy(result[idx][:], addr[:]) - idx++ + //TODO: data is sorted, enough to compare with prevKey + if _, ok := saw[common.BytesToAddress(k)]; !ok { + saw[common.BytesToAddress(k)] = struct{}{} + result = append(result, common.BytesToAddress(k)) + } } - return result, nil } @@ -294,7 +285,7 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, s if err != nil { return nil, err } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + return getModifiedAccounts(tx.(kv.TemporalTx), startTxNum, endTxNum) } func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) { diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index 744b6900d23..5b681954f2c 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -146,7 +146,7 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, // this is one off code to fix an issue in 2.49.x->2.52.x which missed // removal of intermediate segments after a merge operation -func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { +func removeBorOverlaps(dir string, active []snaptype.FileInfo, _max uint64) { list, err := snaptype.Segments(dir) if err != nil { @@ -165,12 +165,12 @@ func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { // added overhead to make sure we don't delete in the // current 500k block segment - if max > 500_001 { - max -= 500_001 + if _max > 500_001 { + _max -= 500_001 } for _, f := range l { - if max < f.From { + if _max < f.From { continue } diff --git a/turbo/snapshotsync/snapshots.go b/turbo/snapshotsync/snapshots.go index 3fc4d054cc3..da86afbc070 100644 --- a/turbo/snapshotsync/snapshots.go +++ b/turbo/snapshotsync/snapshots.go @@ -838,7 +838,7 @@ func (s *RoSnapshots) dirtyIdxAvailability(segtype snaptype.Enum) uint64 { return 0 } - var max uint64 + var _max uint64 dirty.Walk(func(segments []*DirtySegment) bool { for _, seg := range segments { @@ -846,30 +846,30 @@ func (s *RoSnapshots) dirtyIdxAvailability(segtype snaptype.Enum) uint64 { break } - max = seg.to - 1 + _max = seg.to - 1 } return true }) - return max + return _max } func (s *RoSnapshots) visibleIdxAvailability(segtype snaptype.Enum) uint64 { tx := s.ViewType(segtype.Type()) defer tx.Close() - var max uint64 + var _max uint64 for _, seg := range tx.Segments { if !seg.IsIndexed() { break } - max = seg.to - 1 + _max = seg.to - 1 } - return max + return _max } func (s *RoSnapshots) Ls() {