Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove e2 stages constants #11358

Merged
merged 8 commits into from
Jul 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 0 additions & 57 deletions cmd/hack/hack.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ import (
"github.com/erigontech/erigon/cmd/hack/flow"
"github.com/erigontech/erigon/cmd/hack/tool"
"github.com/erigontech/erigon/common"
"github.com/erigontech/erigon/common/paths"
"github.com/erigontech/erigon/core"
"github.com/erigontech/erigon/core/rawdb"
"github.com/erigontech/erigon/core/rawdb/blockio"
Expand Down Expand Up @@ -167,56 +166,6 @@ func printTxHashes(chaindata string, block uint64) error {
return nil
}

func repairCurrent() {
historyDb := mdbx.MustOpen("/Volumes/tb4/erigon/ropsten/geth/chaindata")
defer historyDb.Close()
currentDb := mdbx.MustOpen("statedb")
defer currentDb.Close()
tool.Check(historyDb.Update(context.Background(), func(tx kv.RwTx) error {
return tx.ClearBucket(kv.HashedStorage)
}))
tool.Check(historyDb.Update(context.Background(), func(tx kv.RwTx) error {
newB, err := tx.RwCursor(kv.HashedStorage)
if err != nil {
return err
}
count := 0
if err := currentDb.View(context.Background(), func(ctx kv.Tx) error {
c, err := ctx.Cursor(kv.HashedStorage)
if err != nil {
return err
}
for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
if err != nil {
return err
}
tool.Check(newB.Put(k, v))
count++
if count == 10000 {
fmt.Printf("Copied %d storage items\n", count)
}
}
return nil
}); err != nil {
return err
}
return nil
}))
}

func dumpStorage() {
db := mdbx.MustOpen(paths.DefaultDataDir() + "/geth/chaindata")
defer db.Close()
if err := db.View(context.Background(), func(tx kv.Tx) error {
return tx.ForEach(kv.E2StorageHistory, nil, func(k, v []byte) error {
fmt.Printf("%x %x\n", k, v)
return nil
})
}); err != nil {
panic(err)
}
}

func printBucket(chaindata string) {
db := mdbx.MustOpen(chaindata)
defer db.Close()
Expand Down Expand Up @@ -930,9 +879,6 @@ func main() {
case "testBlockHashes":
testBlockHashes(*chaindata, *block, libcommon.HexToHash(*hash))

case "dumpStorage":
dumpStorage()

case "current":
printCurrentBlockNumber(*chaindata)

Expand All @@ -957,9 +903,6 @@ func main() {
case "extractBodies":
err = extractBodies(*chaindata)

case "repairCurrent":
repairCurrent()

case "printTxHashes":
printTxHashes(*chaindata, uint64(*block))

Expand Down
8 changes: 3 additions & 5 deletions core/rawdb/rawdbreset/reset_stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, log
cleanupList = append(cleanupList, stateV3Buckets...)

return db.Update(ctx, func(tx kv.RwTx) error {
if err := clearStageProgress(tx, stages.Execution, stages.HashState, stages.IntermediateHashes); err != nil {
if err := clearStageProgress(tx, stages.Execution); err != nil {
return err
}

Expand All @@ -162,10 +162,8 @@ func ResetTxLookup(tx kv.RwTx) error {
}

var Tables = map[stages.SyncStage][]string{
stages.HashState: {kv.HashedAccounts, kv.HashedStorage, kv.ContractCode},
stages.IntermediateHashes: {kv.TrieOfAccounts, kv.TrieOfStorage},
stages.CustomTrace: {},
stages.Finish: {},
stages.CustomTrace: {},
stages.Finish: {},
}
var stateBuckets = []string{
kv.Epoch, kv.PendingEpoch, kv.BorReceipts,
Expand Down
17 changes: 0 additions & 17 deletions erigon-lib/diagnostics/entities.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,6 @@ import (
"golang.org/x/exp/maps"
)

type SyncStageType string

const (
Snapshots SyncStageType = "Snapshots"
BlockHashes SyncStageType = "BlockHashes"
Senders SyncStageType = "Senders"
Execution SyncStageType = "Execution"
HashState SyncStageType = "HashState"
IntermediateHashes SyncStageType = "IntermediateHashes"
CallTraces SyncStageType = "CallTraces"
AccountHistoryIndex SyncStageType = "AccountHistoryIndex"
StorageHistoryIndex SyncStageType = "StorageHistoryIndex"
LogIndex SyncStageType = "LogIndex"
TxLookup SyncStageType = "TxLookup"
Finish SyncStageType = "Finish"
)

type PeerStatistics struct {
PeerType string
BytesIn uint64
Expand Down
40 changes: 14 additions & 26 deletions eth/stagedsync/stages/stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,26 +29,20 @@ import (
type SyncStage string

var (
Snapshots SyncStage = "Snapshots" // Snapshots
Headers SyncStage = "Headers" // Headers are downloaded, their Proof-Of-Work validity and chaining is verified
BorHeimdall SyncStage = "BorHeimdall" // Downloading data from heimdall corresponding to the downloaded headers (validator sets and sync events)
PolygonSync SyncStage = "PolygonSync" // Use polygon sync component to sync headers, bodies and heimdall data
CumulativeIndex SyncStage = "CumulativeIndex" // Calculate how much gas has been used up to each block.
BlockHashes SyncStage = "BlockHashes" // Headers Number are written, fills blockHash => number bucket
Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified
Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written
Execution SyncStage = "Execution" // Executing each block w/o buildinf a trie
CustomTrace SyncStage = "CustomTrace" // Executing each block w/o buildinf a trie
Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM)
VerkleTrie SyncStage = "VerkleTrie"
IntermediateHashes SyncStage = "IntermediateHashes" // Generate intermediate hashes, calculate the state root hash
HashState SyncStage = "HashState" // Apply Keccak256 to all the keys in the state
AccountHistoryIndex SyncStage = "AccountHistoryIndex" // Generating history index for accounts
StorageHistoryIndex SyncStage = "StorageHistoryIndex" // Generating history index for storage
LogIndex SyncStage = "LogIndex" // Generating logs index (from receipts)
CallTraces SyncStage = "CallTraces" // Generating call traces index
TxLookup SyncStage = "TxLookup" // Generating transactions lookup index
Finish SyncStage = "Finish" // Nominal stage after all other stages
Snapshots SyncStage = "Snapshots" // Snapshots
Headers SyncStage = "Headers" // Headers are downloaded, their Proof-Of-Work validity and chaining is verified
BorHeimdall SyncStage = "BorHeimdall" // Downloading data from heimdall corresponding to the downloaded headers (validator sets and sync events)
PolygonSync SyncStage = "PolygonSync" // Use polygon sync component to sync headers, bodies and heimdall data
CumulativeIndex SyncStage = "CumulativeIndex" // Calculate how much gas has been used up to each block.
BlockHashes SyncStage = "BlockHashes" // Headers Number are written, fills blockHash => number bucket
Bodies SyncStage = "Bodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified
Senders SyncStage = "Senders" // "From" recovered from signatures, bodies re-written
Execution SyncStage = "Execution" // Executing each block w/o buildinf a trie
CustomTrace SyncStage = "CustomTrace" // Executing each block w/o buildinf a trie
Translation SyncStage = "Translation" // Translation each marked for translation contract (from EVM to TEVM)
VerkleTrie SyncStage = "VerkleTrie"
TxLookup SyncStage = "TxLookup" // Generating transactions lookup index
Finish SyncStage = "Finish" // Nominal stage after all other stages

MiningCreateBlock SyncStage = "MiningCreateBlock"
MiningBorHeimdall SyncStage = "MiningBorHeimdall"
Expand All @@ -73,12 +67,6 @@ var AllStages = []SyncStage{
Execution,
CustomTrace,
Translation,
HashState,
IntermediateHashes,
AccountHistoryIndex,
StorageHistoryIndex,
LogIndex,
CallTraces,
TxLookup,
Finish,
}
Expand Down
36 changes: 3 additions & 33 deletions eth/stagedsync/sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,23 +206,8 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) {
return u.Done(txc.Tx)
},
},
{
ID: stages.IntermediateHashes,
Disabled: true,
Forward: func(badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.IntermediateHashes)
if s.BlockNumber == 0 {
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.IntermediateHashes))
return u.Done(txc.Tx)
},
},
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */, false)
assert.NoError(t, err)
Expand Down Expand Up @@ -299,23 +284,8 @@ func TestUnwind(t *testing.T) {
return u.Done(txc.Tx)
},
},
{
ID: stages.IntermediateHashes,
Disabled: true,
Forward: func(badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.IntermediateHashes)
if s.BlockNumber == 0 {
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.IntermediateHashes))
return u.Done(txc.Tx)
},
},
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */, false)
assert.NoError(t, err)
Expand All @@ -342,7 +312,7 @@ func TestUnwind(t *testing.T) {

//check that at unwind disabled stage not appear
flow = flow[:0]
state.unwindOrder = []*Stage{s[3], s[2], s[1], s[0]}
state.unwindOrder = []*Stage{s[2], s[1], s[0]}
_ = state.UnwindTo(100, UnwindReason{}, tx)
_, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */, false)
assert.NoError(t, err)
Expand Down
Loading