diff --git a/config/default.config.yml b/config/default.config.yml index 1aafdad2..38681baa 100644 --- a/config/default.config.yml +++ b/config/default.config.yml @@ -47,15 +47,9 @@ beaconapi: # indexer keeps track of the latest epochs in memory. indexer: - # number of epochs to load on startup - prepopulateEpochs: 2 - # max number of epochs to keep in memory inMemoryEpochs: 3 - # epoch processing delay (should be >= 2) - epochProcessingDelay: 2 - # disable synchronizing and everything that writes to the db (indexer just maintains local cache) disableIndexWriter: false diff --git a/db/db.go b/db/db.go index 6df81826..ee812a61 100644 --- a/db/db.go +++ b/db/db.go @@ -556,3 +556,139 @@ func GetSlotAssignmentsForSlots(firstSlot uint64, lastSlot uint64) []*dbtypes.Sl } return assignments } + +func GetBlockOrphanedRefs(blockRoots [][]byte) []*dbtypes.BlockOrphanedRef { + orphanedRefs := []*dbtypes.BlockOrphanedRef{} + if len(blockRoots) == 0 { + return orphanedRefs + } + var sql strings.Builder + fmt.Fprintf(&sql, ` + SELECT + root, orphaned + FROM blocks + WHERE root in (`) + argIdx := 0 + args := make([]any, len(blockRoots)) + for i, root := range blockRoots { + if i > 0 { + fmt.Fprintf(&sql, ", ") + } + fmt.Fprintf(&sql, "$%v", argIdx+1) + args[argIdx] = root + argIdx += 1 + } + fmt.Fprintf(&sql, ")") + err := ReaderDb.Select(&orphanedRefs, sql.String(), args...) + if err != nil { + logger.Errorf("Error while fetching blocks: %v", err) + return nil + } + return orphanedRefs +} + +func InsertUnfinalizedBlock(block *dbtypes.UnfinalizedBlock, tx *sqlx.Tx) error { + _, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: ` + INSERT INTO unfinalized_blocks ( + root, slot, header, block + ) VALUES ($1, $2, $3, $4) + ON CONFLICT (root) DO NOTHING`, + dbtypes.DBEngineSqlite: ` + INSERT OR IGNORE INTO unfinalized_blocks ( + root, slot, header, block + ) VALUES ($1, $2, $3, $4)`, + }), + block.Root, block.Slot, block.Header, block.Block) + if err != nil { + return err + } + return nil +} + +func GetUnfinalizedBlockHeader() []*dbtypes.UnfinalizedBlockHeader { + blockRefs := []*dbtypes.UnfinalizedBlockHeader{} + err := ReaderDb.Select(&blockRefs, ` + SELECT + root, slot, header + FROM unfinalized_blocks + `) + if err != nil { + logger.Errorf("Error while fetching unfinalized block refs: %v", err) + return nil + } + return blockRefs +} + +func GetUnfinalizedBlock(root []byte) *dbtypes.UnfinalizedBlock { + block := dbtypes.UnfinalizedBlock{} + err := ReaderDb.Get(&block, ` + SELECT root, slot, header, block + FROM unfinalized_blocks + WHERE root = $1 + `, root) + if err != nil { + logger.Errorf("Error while fetching unfinalized block 0x%x: %v", root, err) + return nil + } + return &block +} + +func InsertUnfinalizedEpochDuty(epochDuty *dbtypes.UnfinalizedEpochDuty, tx *sqlx.Tx) error { + _, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{ + dbtypes.DBEnginePgsql: ` + INSERT INTO unfinalized_duties ( + epoch, dependent_root, duties + ) VALUES ($1, $2, $3) + ON CONFLICT (root) DO NOTHING`, + dbtypes.DBEngineSqlite: ` + INSERT OR IGNORE INTO unfinalized_duties ( + epoch, dependent_root, duties + ) VALUES ($1, $2, $3)`, + }), + epochDuty.Epoch, epochDuty.DependentRoot, epochDuty.Duties) + if err != nil { + return err + } + return nil +} + +func GetUnfinalizedEpochDutyRefs() []*dbtypes.UnfinalizedEpochDutyRef { + dutyRefs := []*dbtypes.UnfinalizedEpochDutyRef{} + err := ReaderDb.Select(&dutyRefs, ` + SELECT + epoch, dependent_root + FROM unfinalized_duties + `) + if err != nil { + logger.Errorf("Error while fetching unfinalized duty refs: %v", err) + return nil + } + return dutyRefs +} + +func GetUnfinalizedDuty(epoch uint64, dependentRoot []byte) *dbtypes.UnfinalizedEpochDuty { + epochDuty := dbtypes.UnfinalizedEpochDuty{} + err := ReaderDb.Get(&epochDuty, ` + SELECT epoch, dependent_root, duties + FROM unfinalized_duties + WHERE epoch = $1 AND dependent_root = $2 + `, epoch, dependentRoot) + if err != nil { + logger.Errorf("Error while fetching unfinalized duty %v/0x%x: %v", epoch, dependentRoot, err) + return nil + } + return &epochDuty +} + +func DeleteUnfinalizedBefore(slot uint64, tx *sqlx.Tx) error { + _, err := tx.Exec(`DELETE FROM unfinalized_blocks WHERE slot < $1`, slot) + if err != nil { + return err + } + _, err = tx.Exec(`DELETE FROM unfinalized_duties WHERE epoch < $1`, utils.EpochOfSlot(slot)) + if err != nil { + return err + } + return nil +} diff --git a/db/schema/pgsql/20230720234842_init.sql b/db/schema/pgsql/20230720234842_init.sql index f3dfa613..0439d1bd 100644 --- a/db/schema/pgsql/20230720234842_init.sql +++ b/db/schema/pgsql/20230720234842_init.sql @@ -43,7 +43,7 @@ CREATE INDEX IF NOT EXISTS "blocks_graffiti_idx" CREATE INDEX IF NOT EXISTS "blocks_slot_idx" ON public."blocks" - ("root" ASC NULLS LAST); + ("slot" ASC NULLS LAST); CREATE INDEX IF NOT EXISTS "blocks_state_root_idx" ON public."blocks" diff --git a/db/schema/pgsql/20230820050910_indexer-cache.sql b/db/schema/pgsql/20230820050910_indexer-cache.sql new file mode 100644 index 00000000..2062d1d0 --- /dev/null +++ b/db/schema/pgsql/20230820050910_indexer-cache.sql @@ -0,0 +1,29 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE IF NOT EXISTS public."unfinalized_blocks" +( + "root" bytea NOT NULL, + "slot" bigint NOT NULL, + "header" text COLLATE pg_catalog."default" NOT NULL, + "block" text COLLATE pg_catalog."default" NOT NULL, + CONSTRAINT "unfinalized_blocks_pkey" PRIMARY KEY ("root") +); + +CREATE INDEX IF NOT EXISTS "unfinalized_blocks_slot_idx" + ON public."unfinalized_blocks" + ("slot" ASC NULLS LAST); + +CREATE TABLE IF NOT EXISTS public."unfinalized_duties" +( + "epoch" bigint NOT NULL, + "dependent_root" bytea NOT NULL, + "duties" bytea NOT NULL, + CONSTRAINT "unfinalized_duties_pkey" PRIMARY KEY ("epoch", "dependent_root") +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 'NOT SUPPORTED'; +-- +goose StatementEnd diff --git a/db/schema/sqlite/20230720234842_init.sql b/db/schema/sqlite/20230720234842_init.sql index 51fa4e57..2056e293 100644 --- a/db/schema/sqlite/20230720234842_init.sql +++ b/db/schema/sqlite/20230720234842_init.sql @@ -40,7 +40,7 @@ CREATE INDEX IF NOT EXISTS "blocks_graffiti_idx" CREATE INDEX IF NOT EXISTS "blocks_slot_idx" ON "blocks" - ("root" ASC); + ("slot" ASC); CREATE INDEX IF NOT EXISTS "blocks_state_root_idx" ON "blocks" diff --git a/db/schema/sqlite/20230820050910_indexer-cache.sql b/db/schema/sqlite/20230820050910_indexer-cache.sql new file mode 100644 index 00000000..dff82f93 --- /dev/null +++ b/db/schema/sqlite/20230820050910_indexer-cache.sql @@ -0,0 +1,29 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE IF NOT EXISTS "unfinalized_blocks" +( + "root" BLOB NOT NULL, + "slot" bigint NOT NULL, + "header" text NOT NULL, + "block" text NOT NULL, + CONSTRAINT "unfinalized_blocks_pkey" PRIMARY KEY ("root") +); + +CREATE INDEX IF NOT EXISTS "unfinalized_blocks_slot_idx" + ON "unfinalized_blocks" + ("slot" ASC); + +CREATE TABLE IF NOT EXISTS "unfinalized_duties" +( + "epoch" bigint NOT NULL, + "dependent_root" BLOB NOT NULL, + "duties" BLOB NOT NULL, + CONSTRAINT "unfinalized_duties_pkey" PRIMARY KEY ("epoch", "dependent_root") +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT 'NOT SUPPORTED'; +-- +goose StatementEnd diff --git a/dbtypes/dbtypes.go b/dbtypes/dbtypes.go index 011d1661..8ee7f1a7 100644 --- a/dbtypes/dbtypes.go +++ b/dbtypes/dbtypes.go @@ -28,6 +28,11 @@ type Block struct { SyncParticipation float32 `db:"sync_participation"` } +type BlockOrphanedRef struct { + Root []byte `db:"root"` + Orphaned bool `db:"orphaned"` +} + type Epoch struct { Epoch uint64 `db:"epoch"` ValidatorCount uint64 `db:"validator_count"` @@ -60,3 +65,27 @@ type SlotAssignment struct { Slot uint64 `db:"slot"` Proposer uint64 `db:"proposer"` } + +type UnfinalizedBlock struct { + Root []byte `db:"root"` + Slot uint64 `db:"slot"` + Header string `db:"header"` + Block string `db:"block"` +} + +type UnfinalizedBlockHeader struct { + Root []byte `db:"root"` + Slot uint64 `db:"slot"` + Header string `db:"header"` +} + +type UnfinalizedEpochDuty struct { + Epoch uint64 `db:"epoch"` + DependentRoot []byte `db:"dependent_root"` + Duties []byte `db:"duties"` +} + +type UnfinalizedEpochDutyRef struct { + Epoch uint64 `db:"epoch"` + DependentRoot []byte `db:"dependent_root"` +} diff --git a/handlers/epoch.go b/handlers/epoch.go index a91ab1d1..405d93f1 100644 --- a/handlers/epoch.go +++ b/handlers/epoch.go @@ -75,7 +75,7 @@ func buildEpochPageData(epoch uint64) (*models.EpochPageData, time.Duration) { return nil, -1 } - finalizedHead, _ := services.GlobalBeaconService.GetFinalizedBlockHead() + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() slotAssignments, syncedEpochs := services.GlobalBeaconService.GetProposerAssignments(epoch, epoch) nextEpoch := epoch + 1 @@ -90,10 +90,7 @@ func buildEpochPageData(epoch uint64) (*models.EpochPageData, time.Duration) { NextEpoch: nextEpoch, Ts: utils.EpochToTime(epoch), Synchronized: syncedEpochs[epoch], - } - - if finalizedHead != nil { - pageData.Finalized = uint64(finalizedHead.Data.Header.Message.Slot) >= lastSlot + Finalized: finalizedEpoch >= int64(epoch), } dbEpochs := services.GlobalBeaconService.GetDbEpochs(epoch, 1) diff --git a/handlers/epochs.go b/handlers/epochs.go index fc6824b1..0072102a 100644 --- a/handlers/epochs.go +++ b/handlers/epochs.go @@ -90,7 +90,7 @@ func buildEpochsPageData(firstEpoch uint64, pageSize uint64) (*models.EpochsPage } pageData.LastPageEpoch = pageSize - 1 - finalizedHead, _ := services.GlobalBeaconService.GetFinalizedBlockHead() + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() epochLimit := pageSize // load epochs @@ -102,10 +102,8 @@ func buildEpochsPageData(firstEpoch uint64, pageSize uint64) (*models.EpochsPage allFinalized := true for epochIdx := int64(firstEpoch); epochIdx >= 0 && epochCount < epochLimit; epochIdx-- { epoch := uint64(epochIdx) - finalized := false - if finalizedHead != nil && uint64(finalizedHead.Data.Header.Message.Slot) >= epoch*utils.Config.Chain.Config.SlotsPerEpoch { - finalized = true - } else { + finalized := finalizedEpoch >= epochIdx + if !finalized { allFinalized = false } epochData := &models.EpochsPageDataEpoch{ diff --git a/handlers/index.go b/handlers/index.go index dcbfa34a..d08078d5 100644 --- a/handlers/index.go +++ b/handlers/index.go @@ -40,7 +40,7 @@ func Index(w http.ResponseWriter, r *http.Request) { func getIndexPageData() *models.IndexPageData { pageData := &models.IndexPageData{} - pageCacheKey := fmt.Sprintf("index") + pageCacheKey := "index" pageData = services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} { pageData, cacheTimeout := buildIndexPageData() pageCall.CacheTimeout = cacheTimeout @@ -62,24 +62,15 @@ func buildIndexPageData() (*models.IndexPageData, time.Duration) { currentEpoch = 0 } currentSlot := utils.TimeToSlot(uint64(now.Unix())) - if currentSlot < 0 { - currentSlot = 0 - } currentSlotIndex := (currentSlot % utils.Config.Chain.Config.SlotsPerEpoch) + 1 - finalizedHead, _ := services.GlobalBeaconService.GetFinalizedBlockHead() - var finalizedSlot uint64 - if finalizedHead != nil { - finalizedSlot = uint64(finalizedHead.Data.Header.Message.Slot) - } else { - finalizedSlot = 0 - } + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() syncState := dbtypes.IndexerSyncState{} db.GetExplorerState("indexer.syncstate", &syncState) var isSynced bool - if currentEpoch > int64(utils.Config.Indexer.EpochProcessingDelay) { - isSynced = syncState.Epoch >= uint64(currentEpoch-int64(utils.Config.Indexer.EpochProcessingDelay)) + if finalizedEpoch >= 1 { + isSynced = syncState.Epoch >= uint64(finalizedEpoch-1) } else { isSynced = true } @@ -89,7 +80,7 @@ func buildIndexPageData() (*models.IndexPageData, time.Duration) { DepositContract: utils.Config.Chain.Config.DepositContractAddress, ShowSyncingMessage: !isSynced, CurrentEpoch: uint64(currentEpoch), - CurrentFinalizedEpoch: utils.EpochOfSlot(finalizedSlot), + CurrentFinalizedEpoch: finalizedEpoch, CurrentSlot: currentSlot, CurrentSlotIndex: currentSlotIndex, CurrentScheduledCount: utils.Config.Chain.Config.SlotsPerEpoch - currentSlotIndex, @@ -176,10 +167,6 @@ func buildIndexPageData() (*models.IndexPageData, time.Duration) { if epochData == nil { continue } - finalized := false - if finalizedHead != nil && uint64(finalizedHead.Data.Header.Message.Slot) >= epochData.Epoch*utils.Config.Chain.Config.SlotsPerEpoch { - finalized = true - } voteParticipation := float64(1) if epochData.Eligible > 0 { voteParticipation = float64(epochData.VotedTarget) * 100.0 / float64(epochData.Eligible) @@ -187,7 +174,7 @@ func buildIndexPageData() (*models.IndexPageData, time.Duration) { pageData.RecentEpochs = append(pageData.RecentEpochs, &models.IndexPageDataEpochs{ Epoch: epochData.Epoch, Ts: utils.EpochToTime(epochData.Epoch), - Finalized: finalized, + Finalized: finalizedEpoch >= int64(epochData.Epoch), EligibleEther: epochData.Eligible, TargetVoted: epochData.VotedTarget, HeadVoted: epochData.VotedHead, diff --git a/handlers/search.go b/handlers/search.go index adbcc341..54cc6bd9 100644 --- a/handlers/search.go +++ b/handlers/search.go @@ -143,8 +143,8 @@ func SearchAhead(w http.ResponseWriter, r *http.Request) { if cachedBlock != nil { result = &[]models.SearchAheadSlotsResult{ { - Slot: fmt.Sprintf("%v", uint64(cachedBlock.Header.Data.Header.Message.Slot)), - Root: cachedBlock.Header.Data.Root, + Slot: fmt.Sprintf("%v", uint64(cachedBlock.Header.Message.Slot)), + Root: cachedBlock.Root, Orphaned: cachedBlock.Orphaned, }, } diff --git a/handlers/slot.go b/handlers/slot.go index e5a1401f..850221e8 100644 --- a/handlers/slot.go +++ b/handlers/slot.go @@ -142,14 +142,13 @@ func getSlotPageData(blockSlot int64, blockRoot []byte) *models.SlotPageData { func buildSlotPageData(blockSlot int64, blockRoot []byte) (*models.SlotPageData, time.Duration) { currentSlot := utils.TimeToSlot(uint64(time.Now().Unix())) - finalizedHead, err := services.GlobalBeaconService.GetFinalizedBlockHead() + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() var blockData *rpctypes.CombinedBlockResponse - if err == nil { - if blockSlot > -1 { - blockData, err = services.GlobalBeaconService.GetSlotDetailsBySlot(uint64(blockSlot), false) - } else { - blockData, err = services.GlobalBeaconService.GetSlotDetailsByBlockroot(blockRoot, false) - } + var err error + if blockSlot > -1 { + blockData, err = services.GlobalBeaconService.GetSlotDetailsBySlot(uint64(blockSlot), false) + } else { + blockData, err = services.GlobalBeaconService.GetSlotDetailsByBlockroot(blockRoot, false) } if blockData == nil && err == nil { @@ -167,7 +166,7 @@ func buildSlotPageData(blockSlot int64, blockRoot []byte) (*models.SlotPageData, var slot uint64 if blockData != nil { - slot = uint64(blockData.Header.Data.Header.Message.Slot) + slot = uint64(blockData.Header.Message.Slot) } else if blockSlot > -1 { slot = uint64(blockSlot) } else { @@ -176,16 +175,13 @@ func buildSlotPageData(blockSlot int64, blockRoot []byte) (*models.SlotPageData, logrus.Printf("slot page called: %v", slot) pageData := &models.SlotPageData{ - Slot: slot, - Epoch: utils.EpochOfSlot(slot), - Ts: utils.SlotToTime(slot), - NextSlot: slot + 1, - PreviousSlot: slot - 1, - Future: slot >= currentSlot, - } - - if finalizedHead != nil { - pageData.EpochFinalized = uint64(finalizedHead.Data.Header.Message.Slot) >= slot + Slot: slot, + Epoch: utils.EpochOfSlot(slot), + Ts: utils.SlotToTime(slot), + NextSlot: slot + 1, + PreviousSlot: slot - 1, + Future: slot >= currentSlot, + EpochFinalized: finalizedEpoch >= int64(utils.EpochOfSlot(slot)), } assignments, err := services.GlobalBeaconService.GetEpochAssignments(utils.EpochOfSlot(slot)) @@ -196,7 +192,7 @@ func buildSlotPageData(blockSlot int64, blockRoot []byte) (*models.SlotPageData, var cacheTimeout time.Duration if pageData.Future { - timeDiff := pageData.Ts.Sub(time.Now()) + timeDiff := time.Until(pageData.Ts) if timeDiff > 10*time.Minute { cacheTimeout = 10 * time.Minute } else { @@ -223,7 +219,7 @@ func buildSlotPageData(blockSlot int64, blockRoot []byte) (*models.SlotPageData, } else { pageData.Status = uint16(models.SlotStatusFound) } - pageData.Proposer = uint64(blockData.Block.Data.Message.ProposerIndex) + pageData.Proposer = uint64(blockData.Block.Message.ProposerIndex) pageData.ProposerName = services.GlobalBeaconService.GetValidatorName(pageData.Proposer) pageData.Block = getSlotPageBlockData(blockData, assignments) } @@ -233,24 +229,24 @@ func buildSlotPageData(blockSlot int64, blockRoot []byte) (*models.SlotPageData, func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments *rpctypes.EpochAssignments) *models.SlotPageBlockData { pageData := &models.SlotPageBlockData{ - BlockRoot: blockData.Header.Data.Root, - ParentRoot: blockData.Header.Data.Header.Message.ParentRoot, - StateRoot: blockData.Header.Data.Header.Message.StateRoot, - Signature: blockData.Header.Data.Header.Signature, - RandaoReveal: blockData.Block.Data.Message.Body.RandaoReveal, - Graffiti: blockData.Block.Data.Message.Body.Graffiti, - Eth1dataDepositroot: blockData.Block.Data.Message.Body.Eth1Data.DepositRoot, - Eth1dataDepositcount: uint64(blockData.Block.Data.Message.Body.Eth1Data.DepositCount), - Eth1dataBlockhash: blockData.Block.Data.Message.Body.Eth1Data.BlockHash, - ProposerSlashingsCount: uint64(len(blockData.Block.Data.Message.Body.ProposerSlashings)), - AttesterSlashingsCount: uint64(len(blockData.Block.Data.Message.Body.AttesterSlashings)), - AttestationsCount: uint64(len(blockData.Block.Data.Message.Body.Attestations)), - DepositsCount: uint64(len(blockData.Block.Data.Message.Body.Deposits)), - VoluntaryExitsCount: uint64(len(blockData.Block.Data.Message.Body.VoluntaryExits)), - SlashingsCount: uint64(len(blockData.Block.Data.Message.Body.ProposerSlashings)) + uint64(len(blockData.Block.Data.Message.Body.AttesterSlashings)), + BlockRoot: blockData.Root, + ParentRoot: blockData.Header.Message.ParentRoot, + StateRoot: blockData.Header.Message.StateRoot, + Signature: blockData.Header.Signature, + RandaoReveal: blockData.Block.Message.Body.RandaoReveal, + Graffiti: blockData.Block.Message.Body.Graffiti, + Eth1dataDepositroot: blockData.Block.Message.Body.Eth1Data.DepositRoot, + Eth1dataDepositcount: uint64(blockData.Block.Message.Body.Eth1Data.DepositCount), + Eth1dataBlockhash: blockData.Block.Message.Body.Eth1Data.BlockHash, + ProposerSlashingsCount: uint64(len(blockData.Block.Message.Body.ProposerSlashings)), + AttesterSlashingsCount: uint64(len(blockData.Block.Message.Body.AttesterSlashings)), + AttestationsCount: uint64(len(blockData.Block.Message.Body.Attestations)), + DepositsCount: uint64(len(blockData.Block.Message.Body.Deposits)), + VoluntaryExitsCount: uint64(len(blockData.Block.Message.Body.VoluntaryExits)), + SlashingsCount: uint64(len(blockData.Block.Message.Body.ProposerSlashings)) + uint64(len(blockData.Block.Message.Body.AttesterSlashings)), } - epoch := utils.EpochOfSlot(uint64(blockData.Header.Data.Header.Message.Slot)) + epoch := utils.EpochOfSlot(uint64(blockData.Header.Message.Slot)) assignmentsMap := make(map[uint64]*rpctypes.EpochAssignments) assignmentsLoaded := make(map[uint64]bool) assignmentsMap[epoch] = assignments @@ -258,7 +254,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments pageData.Attestations = make([]*models.SlotPageAttestation, pageData.AttestationsCount) for i := uint64(0); i < pageData.AttestationsCount; i++ { - attestation := blockData.Block.Data.Message.Body.Attestations[i] + attestation := blockData.Block.Message.Body.Attestations[i] var attAssignments []uint64 attEpoch := utils.EpochOfSlot(uint64(attestation.Data.Slot)) if !assignmentsLoaded[attEpoch] { // load epoch duties if needed @@ -295,7 +291,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments pageData.Deposits = make([]*models.SlotPageDeposit, pageData.DepositsCount) for i := uint64(0); i < pageData.DepositsCount; i++ { - deposit := blockData.Block.Data.Message.Body.Deposits[i] + deposit := blockData.Block.Message.Body.Deposits[i] pageData.Deposits[i] = &models.SlotPageDeposit{ PublicKey: deposit.Data.Pubkey, Withdrawalcredentials: deposit.Data.WithdrawalCredentials, @@ -306,7 +302,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments pageData.VoluntaryExits = make([]*models.SlotPageVoluntaryExit, pageData.VoluntaryExitsCount) for i := uint64(0); i < pageData.VoluntaryExitsCount; i++ { - exit := blockData.Block.Data.Message.Body.VoluntaryExits[i] + exit := blockData.Block.Message.Body.VoluntaryExits[i] pageData.VoluntaryExits[i] = &models.SlotPageVoluntaryExit{ ValidatorIndex: uint64(exit.Message.ValidatorIndex), ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(exit.Message.ValidatorIndex)), @@ -317,7 +313,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments pageData.AttesterSlashings = make([]*models.SlotPageAttesterSlashing, pageData.AttesterSlashingsCount) for i := uint64(0); i < pageData.AttesterSlashingsCount; i++ { - slashing := blockData.Block.Data.Message.Body.AttesterSlashings[i] + slashing := blockData.Block.Message.Body.AttesterSlashings[i] slashingData := &models.SlotPageAttesterSlashing{ Attestation1Indices: make([]uint64, len(slashing.Attestation1.AttestingIndices)), Attestation1Signature: slashing.Attestation1.Signature, @@ -358,7 +354,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments pageData.ProposerSlashings = make([]*models.SlotPageProposerSlashing, pageData.ProposerSlashingsCount) for i := uint64(0); i < pageData.ProposerSlashingsCount; i++ { - slashing := blockData.Block.Data.Message.Body.ProposerSlashings[i] + slashing := blockData.Block.Message.Body.ProposerSlashings[i] pageData.ProposerSlashings[i] = &models.SlotPageProposerSlashing{ ProposerIndex: uint64(slashing.SignedHeader1.Message.ProposerIndex), ProposerName: services.GlobalBeaconService.GetValidatorName(uint64(slashing.SignedHeader1.Message.ProposerIndex)), @@ -376,7 +372,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments } if epoch >= utils.Config.Chain.Config.AltairForkEpoch { - syncAggregate := blockData.Block.Data.Message.Body.SyncAggregate + syncAggregate := blockData.Block.Message.Body.SyncAggregate pageData.SyncAggregateBits = syncAggregate.SyncCommitteeBits pageData.SyncAggregateSignature = syncAggregate.SyncCommitteeSignature if assignments != nil { @@ -394,7 +390,7 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments } if epoch >= utils.Config.Chain.Config.BellatrixForkEpoch { - executionPayload := blockData.Block.Data.Message.Body.ExecutionPayload + executionPayload := blockData.Block.Message.Body.ExecutionPayload pageData.ExecutionData = &models.SlotPageExecutionData{ ParentHash: executionPayload.ParentHash, FeeRecipient: executionPayload.FeeRecipient, @@ -415,10 +411,10 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments } if epoch >= utils.Config.Chain.Config.CappellaForkEpoch { - pageData.BLSChangesCount = uint64(len(blockData.Block.Data.Message.Body.SignedBLSToExecutionChange)) + pageData.BLSChangesCount = uint64(len(blockData.Block.Message.Body.SignedBLSToExecutionChange)) pageData.BLSChanges = make([]*models.SlotPageBLSChange, pageData.BLSChangesCount) for i := uint64(0); i < pageData.BLSChangesCount; i++ { - blschange := blockData.Block.Data.Message.Body.SignedBLSToExecutionChange[i] + blschange := blockData.Block.Message.Body.SignedBLSToExecutionChange[i] pageData.BLSChanges[i] = &models.SlotPageBLSChange{ ValidatorIndex: uint64(blschange.Message.ValidatorIndex), ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(blschange.Message.ValidatorIndex)), @@ -428,10 +424,10 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments } } - pageData.WithdrawalsCount = uint64(len(blockData.Block.Data.Message.Body.ExecutionPayload.Withdrawals)) + pageData.WithdrawalsCount = uint64(len(blockData.Block.Message.Body.ExecutionPayload.Withdrawals)) pageData.Withdrawals = make([]*models.SlotPageWithdrawal, pageData.WithdrawalsCount) for i := uint64(0); i < pageData.WithdrawalsCount; i++ { - withdrawal := blockData.Block.Data.Message.Body.ExecutionPayload.Withdrawals[i] + withdrawal := blockData.Block.Message.Body.ExecutionPayload.Withdrawals[i] pageData.Withdrawals[i] = &models.SlotPageWithdrawal{ Index: uint64(withdrawal.Index), ValidatorIndex: uint64(withdrawal.ValidatorIndex), @@ -443,12 +439,12 @@ func getSlotPageBlockData(blockData *rpctypes.CombinedBlockResponse, assignments } if epoch >= utils.Config.Chain.Config.DenebForkEpoch { - pageData.BlobsCount = uint64(len(blockData.Block.Data.Message.Body.BlobKzgCommitments)) + pageData.BlobsCount = uint64(len(blockData.Block.Message.Body.BlobKzgCommitments)) pageData.Blobs = make([]*models.SlotPageBlob, pageData.BlobsCount) for i := uint64(0); i < pageData.BlobsCount; i++ { blobData := &models.SlotPageBlob{ Index: i, - KzgCommitment: blockData.Block.Data.Message.Body.BlobKzgCommitments[i], + KzgCommitment: blockData.Block.Message.Body.BlobKzgCommitments[i], } pageData.Blobs[i] = blobData } diff --git a/handlers/slots.go b/handlers/slots.go index 610018b4..7ec6225e 100644 --- a/handlers/slots.go +++ b/handlers/slots.go @@ -106,7 +106,7 @@ func buildSlotsPageData(firstSlot uint64, pageSize uint64) (*models.SlotsPageDat } pageData.LastPageSlot = pageSize - 1 - finalizedHead, _ := services.GlobalBeaconService.GetFinalizedBlockHead() + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() slotLimit := pageSize - 1 var lastSlot uint64 if firstSlot > uint64(slotLimit) { @@ -129,10 +129,8 @@ func buildSlotsPageData(firstSlot uint64, pageSize uint64) (*models.SlotsPageDat allFinalized := true for slotIdx := int64(firstSlot); slotIdx >= int64(lastSlot); slotIdx-- { slot := uint64(slotIdx) - finalized := false - if finalizedHead != nil && uint64(finalizedHead.Data.Header.Message.Slot) >= slot { - finalized = true - } else { + finalized := finalizedEpoch >= int64(utils.EpochOfSlot(slot)) + if !finalized { allFinalized = false } haveBlock := false @@ -232,7 +230,7 @@ func buildSlotsPageDataWithGraffitiFilter(graffiti string, pageIdx uint64, pageS } pageData.LastPageSlot = 0 - finalizedHead, _ := services.GlobalBeaconService.GetFinalizedBlockHead() + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() // load slots pageData.Slots = make([]*models.SlotsPageDataSlot, 0) @@ -244,10 +242,6 @@ func buildSlotsPageDataWithGraffitiFilter(graffiti string, pageIdx uint64, pageS break } slot := dbBlock.Slot - finalized := false - if finalizedHead != nil && uint64(finalizedHead.Data.Header.Message.Slot) >= slot { - finalized = true - } blockStatus := uint8(1) if dbBlock.Orphaned { blockStatus = 2 @@ -257,7 +251,7 @@ func buildSlotsPageDataWithGraffitiFilter(graffiti string, pageIdx uint64, pageS Slot: slot, Epoch: utils.EpochOfSlot(slot), Ts: utils.SlotToTime(slot), - Finalized: finalized, + Finalized: finalizedEpoch >= int64(utils.EpochOfSlot(slot)), Status: blockStatus, Synchronized: true, Proposer: dbBlock.Proposer, diff --git a/handlers/validator_slots.go b/handlers/validator_slots.go index 812aeeb7..50d161d7 100644 --- a/handlers/validator_slots.go +++ b/handlers/validator_slots.go @@ -84,7 +84,7 @@ func buildValidatorSlotsPageData(validator uint64, pageIdx uint64, pageSize uint } pageData.LastPageSlot = 0 - finalizedHead, _ := services.GlobalBeaconService.GetFinalizedBlockHead() + finalizedEpoch, _ := services.GlobalBeaconService.GetFinalizedEpoch() // load slots pageData.Slots = make([]*models.ValidatorSlotsPageDataSlot, 0) @@ -96,17 +96,13 @@ func buildValidatorSlotsPageData(validator uint64, pageIdx uint64, pageSize uint break } slot := blockAssignment.Slot - finalized := false - if finalizedHead != nil && uint64(finalizedHead.Data.Header.Message.Slot) >= slot { - finalized = true - } blockStatus := uint8(0) slotData := &models.ValidatorSlotsPageDataSlot{ Slot: slot, Epoch: utils.EpochOfSlot(slot), Ts: utils.SlotToTime(slot), - Finalized: finalized, + Finalized: finalizedEpoch >= int64(utils.EpochOfSlot(slot)), Status: blockStatus, Proposer: validator, ProposerName: pageData.Name, diff --git a/handlers/validators.go b/handlers/validators.go index ec7bc389..a51954ea 100644 --- a/handlers/validators.go +++ b/handlers/validators.go @@ -73,9 +73,9 @@ func buildValidatorsPageData(firstValIdx uint64, pageSize uint64, stateFilter st validatorSet = validatorSetRsp.Data } - if stateFilter != "" { - // TODO: apply filter - } + //if stateFilter != "" { + // TODO: apply filter + //} totalValidatorCount := uint64(len(validatorSet)) if firstValIdx == 0 { diff --git a/indexer/blockInfo.go b/indexer/blockInfo.go deleted file mode 100644 index 273bcf46..00000000 --- a/indexer/blockInfo.go +++ /dev/null @@ -1,50 +0,0 @@ -package indexer - -import ( - "encoding/json" - - "github.com/pk910/light-beaconchain-explorer/dbtypes" - "github.com/pk910/light-beaconchain-explorer/rpctypes" -) - -type BlockInfo struct { - Header *rpctypes.StandardV1BeaconHeaderResponse - Block *rpctypes.StandardV2BeaconBlockResponse - Orphaned bool -} - -func BuildOrphanedBlock(block *BlockInfo) *dbtypes.OrphanedBlock { - headerJson, err := json.Marshal(block.Header) - if err != nil { - return nil - } - blockJson, err := json.Marshal(block.Block) - if err != nil { - return nil - } - return &dbtypes.OrphanedBlock{ - Root: block.Header.Data.Root, - Header: string(headerJson), - Block: string(blockJson), - } -} - -func ParseOrphanedBlock(blockData *dbtypes.OrphanedBlock) *BlockInfo { - var header rpctypes.StandardV1BeaconHeaderResponse - err := json.Unmarshal([]byte(blockData.Header), &header) - if err != nil { - logger.Warnf("Error parsing orphaned block header from db: %v", err) - return nil - } - var block rpctypes.StandardV2BeaconBlockResponse - err = json.Unmarshal([]byte(blockData.Block), &block) - if err != nil { - logger.Warnf("Error parsing orphaned block body from db: %v", err) - return nil - } - return &BlockInfo{ - Header: &header, - Block: &block, - Orphaned: true, - } -} diff --git a/indexer/cache.go b/indexer/cache.go new file mode 100644 index 00000000..1b1527ed --- /dev/null +++ b/indexer/cache.go @@ -0,0 +1,223 @@ +package indexer + +import ( + "bytes" + "encoding/json" + "sync" + + "github.com/pk910/light-beaconchain-explorer/db" + "github.com/pk910/light-beaconchain-explorer/rpctypes" + "github.com/pk910/light-beaconchain-explorer/utils" +) + +type indexerCache struct { + indexer *Indexer + triggerChan chan bool + synchronizer *synchronizerState + cacheMutex sync.RWMutex + highestSlot int64 + lowestSlot int64 + finalizedEpoch int64 + finalizedRoot []byte + processedEpoch int64 + persistEpoch int64 + cleanupEpoch int64 + slotMap map[uint64][]*CacheBlock + rootMap map[string]*CacheBlock + epochStatsMutex sync.RWMutex + epochStatsMap map[uint64][]*EpochStats + lastValidatorsEpoch int64 + lastValidatorsResp *rpctypes.StandardV1StateValidatorsResponse + validatorLoadingLimiter chan int +} + +func newIndexerCache(indexer *Indexer) *indexerCache { + cache := &indexerCache{ + indexer: indexer, + triggerChan: make(chan bool, 10), + highestSlot: -1, + lowestSlot: -1, + finalizedEpoch: -1, + processedEpoch: -2, + persistEpoch: -1, + cleanupEpoch: -1, + slotMap: make(map[uint64][]*CacheBlock), + rootMap: make(map[string]*CacheBlock), + epochStatsMap: make(map[uint64][]*EpochStats), + lastValidatorsEpoch: -1, + validatorLoadingLimiter: make(chan int, 2), + } + cache.loadStoredUnfinalizedCache() + go cache.runCacheLoop() + return cache +} + +func (cache *indexerCache) startSynchronizer(startEpoch uint64) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + + if cache.synchronizer == nil { + cache.synchronizer = newSynchronizer(cache.indexer) + } + if !cache.synchronizer.isEpochAhead(startEpoch) { + cache.synchronizer.startSync(startEpoch) + } +} + +func (cache *indexerCache) setFinalizedHead(epoch int64, root []byte) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + if epoch > cache.finalizedEpoch { + cache.finalizedEpoch = epoch + cache.finalizedRoot = root + + // trigger processing + cache.triggerChan <- true + } +} + +func (cache *indexerCache) getFinalizedHead() (int64, []byte) { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + return cache.finalizedEpoch, cache.finalizedRoot +} + +func (cache *indexerCache) setLastValidators(epoch uint64, validators *rpctypes.StandardV1StateValidatorsResponse) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + if int64(epoch) > cache.lastValidatorsEpoch { + cache.lastValidatorsEpoch = int64(epoch) + cache.lastValidatorsResp = validators + } +} + +func (cache *indexerCache) loadStoredUnfinalizedCache() error { + blockHeaders := db.GetUnfinalizedBlockHeader() + for _, blockHeader := range blockHeaders { + var header rpctypes.SignedBeaconBlockHeader + err := json.Unmarshal([]byte(blockHeader.Header), &header) + if err != nil { + logger.Warnf("Error parsing unfinalized block header from db: %v", err) + continue + } + logger.Debugf("Restored unfinalized block header from db: %v", blockHeader.Slot) + cachedBlock, _ := cache.createOrGetCachedBlock(blockHeader.Root, blockHeader.Slot) + cachedBlock.mutex.Lock() + cachedBlock.header = &header + cachedBlock.isInDb = true + cachedBlock.mutex.Unlock() + } + epochDuties := db.GetUnfinalizedEpochDutyRefs() + for _, epochDuty := range epochDuties { + logger.Debugf("Restored unfinalized block duty ref from db: %v/0x%x", epochDuty.Epoch, epochDuty.DependentRoot) + epochStats, _ := cache.createOrGetEpochStats(epochDuty.Epoch, epochDuty.DependentRoot) + epochStats.dutiesInDb = true + } + return nil +} + +func (cache *indexerCache) resetLowestSlot() { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + var lowestSlot int64 = -1 + for slot := range cache.slotMap { + if lowestSlot == -1 || int64(slot) < lowestSlot { + lowestSlot = int64(slot) + } + } + if lowestSlot != cache.lowestSlot { + logger.Debugf("Reset lowest cached slot: %v", lowestSlot) + cache.lowestSlot = lowestSlot + } +} + +func (cache *indexerCache) isCanonicalBlock(blockRoot []byte, head []byte) bool { + res, _ := cache.getCanonicalDistance(blockRoot, head) + return res +} + +func (cache *indexerCache) getCanonicalDistance(blockRoot []byte, head []byte) (bool, uint64) { + if head == nil { + head = cache.finalizedRoot + } + block := cache.getCachedBlock(blockRoot) + var blockSlot uint64 + if block == nil { + blockSlot = uint64(cache.finalizedEpoch+1) * utils.Config.Chain.Config.SlotsPerEpoch + } else { + blockSlot = block.Slot + } + canonicalBlock := cache.getCachedBlock(head) + var distance uint64 = 0 + if bytes.Equal(canonicalBlock.Root, blockRoot) { + return true, distance + } + for canonicalBlock != nil { + if canonicalBlock.Slot < blockSlot { + return false, 0 + } + parentRoot := canonicalBlock.GetParentRoot() + if parentRoot == nil { + return false, 0 + } + distance++ + if bytes.Equal(parentRoot, blockRoot) { + return true, distance + } + canonicalBlock = cache.getCachedBlock(parentRoot) + if canonicalBlock == nil { + return false, 0 + } + } + return false, 0 +} + +func (cache *indexerCache) getLastCanonicalBlock(epoch uint64, head []byte) *CacheBlock { + if head == nil { + head = cache.finalizedRoot + } + canonicalBlock := cache.getCachedBlock(head) + for canonicalBlock != nil && utils.EpochOfSlot(canonicalBlock.Slot) > epoch { + parentRoot := canonicalBlock.GetParentRoot() + if parentRoot == nil { + return nil + } + canonicalBlock = cache.getCachedBlock(parentRoot) + if canonicalBlock == nil { + return nil + } + } + if canonicalBlock != nil && utils.EpochOfSlot(canonicalBlock.Slot) == epoch { + return canonicalBlock + } else { + return nil + } +} + +func (cache *indexerCache) getFirstCanonicalBlock(epoch uint64, head []byte) *CacheBlock { + canonicalBlock := cache.getLastCanonicalBlock(epoch, head) + for canonicalBlock != nil { + canonicalBlock.mutex.RLock() + parentRoot := []byte(canonicalBlock.header.Message.ParentRoot) + canonicalBlock.mutex.RUnlock() + parentCanonicalBlock := cache.getCachedBlock(parentRoot) + if parentCanonicalBlock == nil || utils.EpochOfSlot(parentCanonicalBlock.Slot) != epoch { + return canonicalBlock + } + canonicalBlock = parentCanonicalBlock + } + return nil +} + +func (cache *indexerCache) getCanonicalBlockMap(epoch uint64, head []byte) map[uint64]*CacheBlock { + canonicalMap := make(map[uint64]*CacheBlock) + canonicalBlock := cache.getLastCanonicalBlock(epoch, head) + for canonicalBlock != nil && utils.EpochOfSlot(canonicalBlock.Slot) == epoch { + canonicalBlock.mutex.RLock() + parentRoot := []byte(canonicalBlock.header.Message.ParentRoot) + canonicalMap[canonicalBlock.Slot] = canonicalBlock + canonicalBlock.mutex.RUnlock() + canonicalBlock = cache.getCachedBlock(parentRoot) + } + return canonicalMap +} diff --git a/indexer/cacheBlock.go b/indexer/cacheBlock.go new file mode 100644 index 00000000..a71a7baa --- /dev/null +++ b/indexer/cacheBlock.go @@ -0,0 +1,146 @@ +package indexer + +import ( + "encoding/json" + "sync" + + "github.com/pk910/light-beaconchain-explorer/db" + "github.com/pk910/light-beaconchain-explorer/dbtypes" + "github.com/pk910/light-beaconchain-explorer/rpctypes" +) + +type CacheBlock struct { + Root []byte + Slot uint64 + mutex sync.RWMutex + seenBy uint64 + isInDb bool + header *rpctypes.SignedBeaconBlockHeader + block *rpctypes.SignedBeaconBlock + + dbBlockMutex sync.Mutex + dbBlockCache *dbtypes.Block +} + +func (cache *indexerCache) getCachedBlock(root []byte) *CacheBlock { + cache.cacheMutex.RLock() + defer cache.cacheMutex.RUnlock() + return cache.rootMap[string(root)] +} + +func (cache *indexerCache) createOrGetCachedBlock(root []byte, slot uint64) (*CacheBlock, bool) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + rootKey := string(root) + if cache.rootMap[rootKey] != nil { + return cache.rootMap[rootKey], false + } + cacheBlock := &CacheBlock{ + Root: root, + Slot: slot, + } + cache.rootMap[rootKey] = cacheBlock + if cache.slotMap[slot] == nil { + cache.slotMap[slot] = []*CacheBlock{cacheBlock} + } else { + cache.slotMap[slot] = append(cache.slotMap[slot], cacheBlock) + } + if int64(slot) > cache.highestSlot { + cache.highestSlot = int64(slot) + } + if int64(slot) > cache.lowestSlot { + cache.lowestSlot = int64(slot) + } + return cacheBlock, true +} + +func (cache *indexerCache) removeCachedBlock(cachedBlock *CacheBlock) { + cache.cacheMutex.Lock() + defer cache.cacheMutex.Unlock() + logger.Debugf("Remove cached block: %v (0x%x)", cachedBlock.Slot, cachedBlock.Root) + + rootKey := string(cachedBlock.Root) + delete(cache.rootMap, rootKey) + + slotBlocks := cache.slotMap[cachedBlock.Slot] + if slotBlocks != nil { + var idx uint64 + len := uint64(len(slotBlocks)) + for idx = 0; idx < len; idx++ { + if slotBlocks[idx] == cachedBlock { + break + } + } + if idx < len { + if len == 1 { + delete(cache.slotMap, cachedBlock.Slot) + } else { + if idx < len-1 { + cache.slotMap[cachedBlock.Slot][idx] = cache.slotMap[cachedBlock.Slot][len-1] + } + cache.slotMap[cachedBlock.Slot] = cache.slotMap[cachedBlock.Slot][0 : len-1] + } + } + } +} + +func (block *CacheBlock) buildOrphanedBlock() *dbtypes.OrphanedBlock { + headerJson, err := json.Marshal(block.header) + if err != nil { + return nil + } + blockJson, err := json.Marshal(block.GetBlockBody()) + if err != nil { + return nil + } + return &dbtypes.OrphanedBlock{ + Root: block.Root, + Header: string(headerJson), + Block: string(blockJson), + } +} + +func (block *CacheBlock) GetParentRoot() []byte { + block.mutex.RLock() + defer block.mutex.RUnlock() + if block.header != nil { + return block.header.Message.ParentRoot + } + return nil +} + +func (block *CacheBlock) GetHeader() *rpctypes.SignedBeaconBlockHeader { + block.mutex.RLock() + defer block.mutex.RUnlock() + return block.header +} + +func (block *CacheBlock) GetBlockBody() *rpctypes.SignedBeaconBlock { + block.mutex.RLock() + defer block.mutex.RUnlock() + if block.block != nil { + return block.block + } + if !block.isInDb { + return nil + } + + logger.Debugf("loading unfinalized block body from db: %v", block.Slot) + blockData := db.GetUnfinalizedBlock(block.Root) + var blockBody rpctypes.SignedBeaconBlock + err := json.Unmarshal([]byte(blockData.Block), &blockBody) + if err != nil { + logger.Warnf("error parsing unfinalized block body from db: %v", err) + return nil + } + block.block = &blockBody + + return block.block +} + +func (block *CacheBlock) IsCanonical(indexer *Indexer, head []byte) bool { + if head == nil { + _, head = indexer.GetCanonicalHead() + } + return indexer.indexerCache.isCanonicalBlock(block.Root, head) +} diff --git a/indexer/cacheLogic.go b/indexer/cacheLogic.go new file mode 100644 index 00000000..47bc2fc9 --- /dev/null +++ b/indexer/cacheLogic.go @@ -0,0 +1,381 @@ +package indexer + +import ( + "time" + + "github.com/pk910/light-beaconchain-explorer/db" + "github.com/pk910/light-beaconchain-explorer/dbtypes" + "github.com/pk910/light-beaconchain-explorer/utils" +) + +func (cache *indexerCache) runCacheLoop() { + defer func() { + if err := recover(); err != nil { + logger.WithError(err.(error)).Errorf("uncaught panic in runCacheLoop subroutine: %v", err) + } + }() + + for { + select { + case <-cache.triggerChan: + case <-time.After(30 * time.Second): + } + logger.Debugf("run indexer cache logic") + err := cache.runCacheLogic() + if err != nil { + logger.Errorf("indexer cache error: %v, retrying in 10 sec...", err) + time.Sleep(10 * time.Second) + } + } +} + +func (cache *indexerCache) runCacheLogic() error { + if cache.highestSlot < 0 { + return nil + } + + var processingEpoch int64 + headEpoch := int64(utils.EpochOfSlot(uint64(cache.highestSlot))) + if cache.indexer.writeDb { + if cache.finalizedEpoch > 0 && cache.processedEpoch == -2 { + syncState := dbtypes.IndexerSyncState{} + _, err := db.GetExplorerState("indexer.syncstate", &syncState) + if err != nil { + cache.processedEpoch = -1 + } else { + cache.processedEpoch = int64(syncState.Epoch) + } + + if cache.processedEpoch < cache.finalizedEpoch { + var syncStartEpoch uint64 + if cache.processedEpoch < 0 { + syncStartEpoch = 0 + } else { + syncStartEpoch = uint64(cache.processedEpoch) + } + cache.startSynchronizer(syncStartEpoch) + cache.processedEpoch = cache.finalizedEpoch + } + } + + logger.Debugf("check finalized processing %v < %v", cache.processedEpoch, cache.finalizedEpoch) + if cache.processedEpoch < cache.finalizedEpoch { + // process finalized epochs + err := cache.processFinalizedEpochs() + if err != nil { + return err + } + } + + if cache.lowestSlot >= 0 && int64(utils.EpochOfSlot(uint64(cache.lowestSlot))) < cache.processedEpoch { + // process cached blocks in already processed epochs (duplicates or new orphaned blocks) + err := cache.processOrphanedBlocks(cache.processedEpoch) + if err != nil { + return err + } + } + processingEpoch = cache.processedEpoch + } else { + processingEpoch = cache.finalizedEpoch + } + + if cache.persistEpoch < headEpoch { + // process cache persistence + err := cache.processCachePersistence() + if err != nil { + return err + } + cache.persistEpoch = headEpoch + } + + if cache.cleanupEpoch < processingEpoch { + // process cache persistence + err := cache.processCacheCleanup(processingEpoch) + if err != nil { + return err + } + cache.cleanupEpoch = processingEpoch + } + + return nil +} + +func (cache *indexerCache) processFinalizedEpochs() error { + if cache.finalizedEpoch < 0 { + return nil + } + for cache.processedEpoch < cache.finalizedEpoch { + processEpoch := uint64(cache.processedEpoch + 1) + err := cache.processFinalizedEpoch(processEpoch) + if err != nil { + return err + } + cache.processedEpoch = int64(processEpoch) + } + return nil +} + +func (cache *indexerCache) processFinalizedEpoch(epoch uint64) error { + firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch + firstBlock := cache.getFirstCanonicalBlock(epoch, nil) + var epochTarget []byte + var epochDependentRoot []byte + if firstBlock == nil { + logger.Warnf("counld not find epoch %v target (no block found)", epoch) + } else { + if firstBlock.Slot == firstSlot { + epochTarget = firstBlock.Root + } else { + epochTarget = firstBlock.header.Message.ParentRoot + } + epochDependentRoot = firstBlock.header.Message.ParentRoot + } + logger.Infof("processing finalized epoch %v: target: 0x%x, dependent: 0x%x", epoch, epochTarget, epochDependentRoot) + + // get epoch stats + epochStats, isNewStats := cache.createOrGetEpochStats(epoch, epochDependentRoot) + if isNewStats { + logger.Warnf("missing epoch stats during finalization processing (epoch: %v)", epoch) + client := cache.indexer.getReadyClient(true, nil) + if client != nil { + client.ensureEpochStats(epoch, client.lastHeadRoot) + time.Sleep(10 * time.Millisecond) + } + } + + // get canonical blocks + canonicalMap := cache.getCanonicalBlockMap(epoch, nil) + // append next epoch blocks (needed for vote aggregation) + for slot, block := range cache.getCanonicalBlockMap(epoch+1, nil) { + canonicalMap[slot] = block + } + + // calculate votes + epochVotes := aggregateEpochVotes(canonicalMap, epoch, epochStats, epochTarget, false) + + if epochStats.validatorStats != nil { + logger.Infof("epoch %v stats: %v validators (%v)", epoch, epochStats.validatorStats.ValidatorCount, epochStats.validatorStats.EligibleAmount) + } + logger.Infof("epoch %v votes: target %v + %v = %v", epoch, epochVotes.currentEpoch.targetVoteAmount, epochVotes.nextEpoch.targetVoteAmount, epochVotes.currentEpoch.targetVoteAmount+epochVotes.nextEpoch.targetVoteAmount) + logger.Infof("epoch %v votes: head %v + %v = %v", epoch, epochVotes.currentEpoch.headVoteAmount, epochVotes.nextEpoch.headVoteAmount, epochVotes.currentEpoch.headVoteAmount+epochVotes.nextEpoch.headVoteAmount) + logger.Infof("epoch %v votes: total %v + %v = %v", epoch, epochVotes.currentEpoch.totalVoteAmount, epochVotes.nextEpoch.totalVoteAmount, epochVotes.currentEpoch.totalVoteAmount+epochVotes.nextEpoch.totalVoteAmount) + + // store canonical blocks to db and remove from cache + tx, err := db.WriterDb.Beginx() + if err != nil { + logger.Errorf("error starting db transactions: %v", err) + return err + } + defer tx.Rollback() + + err = persistEpochData(epoch, canonicalMap, epochStats, epochVotes, tx) + if err != nil { + logger.Errorf("error persisting epoch data to db: %v", err) + } + + if cache.synchronizer == nil || !cache.synchronizer.running { + err = db.SetExplorerState("indexer.syncstate", &dbtypes.IndexerSyncState{ + Epoch: epoch, + }, tx) + if err != nil { + logger.Errorf("error while updating sync state: %v", err) + } + } + + if err := tx.Commit(); err != nil { + logger.Errorf("error committing db transaction: %v", err) + return err + } + + // remove canonical blocks from cache + for slot, block := range canonicalMap { + if utils.EpochOfSlot(slot) == epoch { + cache.removeCachedBlock(block) + } + } + + return nil +} + +func (cache *indexerCache) processOrphanedBlocks(processedEpoch int64) error { + cachedBlocks := map[string]*CacheBlock{} + orphanedBlocks := map[string]*CacheBlock{} + blockRoots := [][]byte{} + cache.cacheMutex.RLock() + for slot, blocks := range cache.slotMap { + if int64(utils.EpochOfSlot(slot)) <= processedEpoch { + for _, block := range blocks { + cachedBlocks[string(block.Root)] = block + orphanedBlocks[string(block.Root)] = block + blockRoots = append(blockRoots, block.Root) + } + } + } + cache.cacheMutex.RUnlock() + + logger.Infof("processing %v non-canonical blocks (epoch <= %v)", len(cachedBlocks), processedEpoch) + if len(cachedBlocks) == 0 { + return nil + } + + // check if blocks are already in db + for _, blockRef := range db.GetBlockOrphanedRefs(blockRoots) { + if blockRef.Orphaned { + logger.Debugf("processed duplicate orphaned block: 0x%x", blockRef.Root) + } else { + logger.Warnf("processed duplicate canonical block in orphaned handler: 0x%x", blockRef.Root) + } + delete(orphanedBlocks, string(blockRef.Root)) + } + + // save orphaned blocks to db + tx, err := db.WriterDb.Beginx() + if err != nil { + logger.Errorf("error starting db transactions: %v", err) + return err + } + defer tx.Rollback() + + for _, block := range orphanedBlocks { + dbBlock := buildDbBlock(block, cache.getEpochStats(utils.EpochOfSlot(block.Slot), nil)) + dbBlock.Orphaned = true + db.InsertBlock(dbBlock, tx) + db.InsertOrphanedBlock(block.buildOrphanedBlock(), tx) + } + + if err := tx.Commit(); err != nil { + logger.Errorf("error committing db transaction: %v", err) + return err + } + + // remove blocks from cache + for _, block := range cachedBlocks { + cache.removeCachedBlock(block) + } + cache.resetLowestSlot() + + return nil +} + +func (cache *indexerCache) processCachePersistence() error { + pruneBlocks := []*CacheBlock{} + cache.cacheMutex.RLock() + headSlot := cache.highestSlot + var headEpoch uint64 + if headSlot >= 0 { + headEpoch = utils.EpochOfSlot(uint64(headSlot)) + } + if headEpoch > uint64(cache.indexer.inMemoryEpochs) { + pruneEpoch := headEpoch - uint64(cache.indexer.inMemoryEpochs) + for slot, blocks := range cache.slotMap { + if utils.EpochOfSlot(slot) <= pruneEpoch { + for _, block := range blocks { + if block.block == nil { + continue + } + pruneBlocks = append(pruneBlocks, block) + } + } + } + } + cache.cacheMutex.RUnlock() + + pruneCount := len(pruneBlocks) + logger.Infof("processing cache persistence: prune %v blocks", pruneCount) + if pruneCount == 0 { + return nil + } + + if cache.indexer.writeDb { + tx, err := db.WriterDb.Beginx() + if err != nil { + logger.Errorf("error starting db transactions: %v", err) + return err + } + defer tx.Rollback() + + for _, block := range pruneBlocks { + if !block.isInDb { + orphanedBlock := block.buildOrphanedBlock() + err := db.InsertUnfinalizedBlock(&dbtypes.UnfinalizedBlock{ + Root: block.Root, + Slot: block.Slot, + Header: orphanedBlock.Header, + Block: orphanedBlock.Block, + }, tx) + if err != nil { + logger.Errorf("error inserting unfinalized block: %v", err) + return err + } + block.isInDb = true + } + } + + if err := tx.Commit(); err != nil { + logger.Errorf("error committing db transaction: %v", err) + return err + } + } + + for _, block := range pruneBlocks { + block.block = nil + } + + return nil +} + +func (cache *indexerCache) processCacheCleanup(processedEpoch int64) error { + cachedBlocks := map[string]*CacheBlock{} + clearStats := []*EpochStats{} + cache.cacheMutex.RLock() + for slot, blocks := range cache.slotMap { + if int64(utils.EpochOfSlot(slot)) <= processedEpoch { + for _, block := range blocks { + cachedBlocks[string(block.Root)] = block + } + } + } + cache.cacheMutex.RUnlock() + cache.epochStatsMutex.RLock() + for epoch, stats := range cache.epochStatsMap { + if int64(epoch) <= processedEpoch { + clearStats = append(clearStats, stats...) + } + } + cache.epochStatsMutex.RUnlock() + + logger.Infof("processing cache cleanup: remove %v blocks, %v epoch stats", len(cachedBlocks), len(clearStats)) + if len(cachedBlocks) > 0 { + // remove blocks from cache + for _, block := range cachedBlocks { + cache.removeCachedBlock(block) + } + } + + if len(clearStats) > 0 { + // remove blocks from cache + for _, stats := range clearStats { + cache.removeEpochStats(stats) + } + } + + if cache.indexer.writeDb { + tx, err := db.WriterDb.Beginx() + if err != nil { + logger.Errorf("error starting db transactions: %v", err) + return err + } + defer tx.Rollback() + + deleteBefore := uint64(processedEpoch+1) * utils.Config.Chain.Config.SlotsPerEpoch + logger.Debugf("delete persisted unfinalized cache before slot %v", deleteBefore) + db.DeleteUnfinalizedBefore(deleteBefore, tx) + + if err := tx.Commit(); err != nil { + logger.Errorf("error committing db transaction: %v", err) + return err + } + } + + return nil +} diff --git a/indexer/client.go b/indexer/client.go new file mode 100644 index 00000000..d7cd6167 --- /dev/null +++ b/indexer/client.go @@ -0,0 +1,411 @@ +package indexer + +import ( + "bytes" + "fmt" + "sync" + "time" + + "github.com/pk910/light-beaconchain-explorer/rpc" + "github.com/pk910/light-beaconchain-explorer/rpctypes" + "github.com/pk910/light-beaconchain-explorer/utils" +) + +type IndexerClient struct { + clientIdx uint8 + clientName string + rpcClient *rpc.BeaconClient + archive bool + priority int + versionStr string + indexerCache *indexerCache + cacheMutex sync.RWMutex + lastStreamEvent time.Time + isSynchronizing bool + isConnected bool + lastHeadSlot int64 + lastHeadRoot []byte + lastEpochStats int64 + lastFinalizedEpoch int64 + lastFinalizedRoot []byte +} + +func newIndexerClient(clientIdx uint8, clientName string, rpcClient *rpc.BeaconClient, indexerCache *indexerCache, archive bool, priority int) *IndexerClient { + client := IndexerClient{ + clientIdx: clientIdx, + clientName: clientName, + rpcClient: rpcClient, + archive: archive, + priority: priority, + indexerCache: indexerCache, + lastHeadSlot: -1, + lastEpochStats: -1, + lastFinalizedEpoch: -1, + } + go client.runIndexerClientLoop() + return &client +} + +func (client *IndexerClient) getLastHead() (int64, []byte) { + client.cacheMutex.RLock() + defer client.cacheMutex.RUnlock() + return client.lastHeadSlot, client.lastHeadRoot +} + +func (client *IndexerClient) runIndexerClientLoop() { + for { + err := client.runIndexerClient() + if err != nil { + logger.WithField("client", client.clientName).Errorf("Indexer client error: %v, retrying in 10 sec...", err) + time.Sleep(10 * time.Second) + } else { + return + } + } +} + +func (client *IndexerClient) runIndexerClient() error { + // get node version + nodeVersion, err := client.rpcClient.GetNodeVersion() + if err != nil { + return fmt.Errorf("error while fetching node version: %v", err) + } + client.versionStr = nodeVersion.Data.Version + + // check genesis + genesis, err := client.rpcClient.GetGenesis() + if err != nil { + return fmt.Errorf("error while fetching genesis: %v", err) + } + if genesis == nil { + return fmt.Errorf("no genesis block found") + } + genesisTime := uint64(genesis.Data.GenesisTime) + if genesisTime != utils.Config.Chain.GenesisTimestamp { + return fmt.Errorf("genesis time from RPC does not match the genesis time from explorer configuration") + } + if genesis.Data.GenesisForkVersion.String() != utils.Config.Chain.Config.GenesisForkVersion { + return fmt.Errorf("genesis fork version from RPC does not match the genesis fork version explorer configuration") + } + + // check syncronization state + syncStatus, err := client.rpcClient.GetNodeSyncing() + if err != nil { + return fmt.Errorf("error while fetching synchronization status: %v", err) + } + if syncStatus == nil { + return fmt.Errorf("could not get synchronization status") + } + client.isSynchronizing = syncStatus.Data.IsSyncing + if syncStatus.Data.IsSyncing { + return fmt.Errorf("beacon node is synchronizing") + } + + logger.WithField("client", client.clientName).Debugf("endpoint %v ready: %v ", client.clientName, client.versionStr) + + // start event stream + blockStream := client.rpcClient.NewBlockStream(rpc.StreamBlockEvent | rpc.StreamHeadEvent | rpc.StreamFinalizedEvent) + defer blockStream.Close() + + // get finalized header + finalizedHeader, err := client.rpcClient.GetFinalizedBlockHead() + if err != nil { + logger.WithField("client", client.clientName).Warnf("could not get finalized header: %v", err) + } + var finalizedSlot uint64 + if finalizedHeader != nil { + client.cacheMutex.Lock() + finalizedSlot = uint64(finalizedHeader.Data.Header.Message.Slot) + client.lastFinalizedEpoch = int64(utils.EpochOfSlot(uint64(finalizedHeader.Data.Header.Message.Slot)) - 1) + client.lastFinalizedRoot = finalizedHeader.Data.Root + client.cacheMutex.Unlock() + } + + // prefill cache + err = client.prefillCache(finalizedSlot) + if err != nil { + return err + } + + // set finalized head and trigger epoch processing / synchronization + if finalizedHeader != nil { + client.indexerCache.setFinalizedHead(client.lastFinalizedEpoch, client.lastFinalizedRoot) + } + + // process events + client.lastStreamEvent = time.Now() + for { + var eventTimeout time.Duration = time.Since(client.lastStreamEvent) + if eventTimeout > 30*time.Second { + eventTimeout = 0 + } else { + eventTimeout = 30*time.Second - eventTimeout + } + select { + case evt := <-blockStream.EventChan: + now := time.Now() + switch evt.Event { + case rpc.StreamBlockEvent: + client.processBlockEvent(evt.Data.(*rpctypes.StandardV1StreamedBlockEvent)) + case rpc.StreamHeadEvent: + client.processHeadEvent(evt.Data.(*rpctypes.StandardV1StreamedHeadEvent)) + case rpc.StreamFinalizedEvent: + client.processFinalizedEvent(evt.Data.(*rpctypes.StandardV1StreamedFinalizedCheckpointEvent)) + } + logger.WithField("client", client.clientName).Debugf("event (%v) processing time: %v ms", evt.Event, time.Since(now).Milliseconds()) + client.lastStreamEvent = time.Now() + case ready := <-blockStream.ReadyChan: + if client.isConnected != ready { + client.isConnected = ready + if ready { + logger.WithField("client", client.clientName).Info("RPC event stream connected") + } else { + logger.WithField("client", client.clientName).Info("RPC event stream disconnected") + } + } + case <-time.After(eventTimeout): + logger.WithField("client", client.clientName).Info("no head event since 30 secs, polling chain head") + err := client.pollLatestBlocks() + if err != nil { + client.isConnected = false + return err + } + } + + currentEpoch := utils.TimeToEpoch(time.Now()) + if currentEpoch > client.lastEpochStats { + // ensure latest epoch stats are loaded for chain of this client + client.ensureEpochStats(uint64(currentEpoch), client.lastHeadRoot) + } + } +} + +func (client *IndexerClient) prefillCache(finalizedSlot uint64) error { + // get latest header + latestHeader, err := client.rpcClient.GetLatestBlockHead() + if err != nil { + return fmt.Errorf("could not get latest header: %v", err) + } + if latestHeader == nil { + return fmt.Errorf("could not find latest header") + } + client.setHeadBlock(latestHeader.Data.Root, uint64(latestHeader.Data.Header.Message.Slot)) + + currentBlock, isNewBlock := client.indexerCache.createOrGetCachedBlock(latestHeader.Data.Root, uint64(latestHeader.Data.Header.Message.Slot)) + if isNewBlock { + logger.WithField("client", client.clientName).Infof("received block %v:%v [0x%x] warmup, head", utils.EpochOfSlot(uint64(client.lastHeadSlot)), client.lastHeadSlot, client.lastHeadRoot) + } else { + logger.WithField("client", client.clientName).Debugf("received known block %v:%v [0x%x] warmup, head", utils.EpochOfSlot(uint64(client.lastHeadSlot)), client.lastHeadSlot, client.lastHeadRoot) + } + client.ensureBlock(currentBlock, &latestHeader.Data.Header) + + // walk backwards and load all blocks until we reach a finalized epoch + parentRoot := []byte(currentBlock.header.Message.ParentRoot) + for { + var parentHead *rpctypes.SignedBeaconBlockHeader + parentBlock := client.indexerCache.getCachedBlock(parentRoot) + if parentBlock != nil { + parentBlock.mutex.RLock() + parentHead = parentBlock.header + parentBlock.mutex.RUnlock() + } + if parentHead == nil { + headerRsp, err := client.rpcClient.GetBlockHeaderByBlockroot(parentRoot) + if err != nil { + return fmt.Errorf("could not load parent header: %v", err) + } + if headerRsp == nil { + return fmt.Errorf("could not find parent header 0x%x", parentRoot) + } + parentHead = &headerRsp.Data.Header + } + parentSlot := uint64(parentHead.Message.Slot) + var isNewBlock bool + if parentBlock == nil { + parentBlock, isNewBlock = client.indexerCache.createOrGetCachedBlock(parentRoot, parentSlot) + } + if isNewBlock { + logger.WithField("client", client.clientName).Infof("received block %v:%v [0x%x] warmup", utils.EpochOfSlot(parentSlot), parentSlot, parentRoot) + } else { + logger.WithField("client", client.clientName).Debugf("received known block %v:%v [0x%x] warmup", utils.EpochOfSlot(parentSlot), parentSlot, parentRoot) + } + client.ensureBlock(parentBlock, parentHead) + if parentSlot <= finalizedSlot { + logger.WithField("client", client.clientName).Debugf("prefill cache: reached finalized slot %v:%v [0x%x]", utils.EpochOfSlot(parentSlot), parentSlot, parentRoot) + break + } + if parentSlot == 0 { + logger.WithField("client", client.clientName).Debugf("prefill cache: reached gensis slot [0x%x]", parentRoot) + break + } + parentRoot = parentHead.Message.ParentRoot + } + + // ensure epoch stats + var firstEpoch uint64 + if finalizedSlot == 0 { + firstEpoch = 0 + } else { + firstEpoch = utils.EpochOfSlot(finalizedSlot) + } + currentEpoch := utils.TimeToEpoch(time.Now()) + if currentEpoch < 0 { + currentEpoch = -1 + } + for epoch := firstEpoch; int64(epoch) <= currentEpoch; epoch++ { + client.ensureEpochStats(epoch, currentBlock.Root) + } + + return nil +} + +func (client *IndexerClient) ensureBlock(block *CacheBlock, header *rpctypes.SignedBeaconBlockHeader) error { + // ensure the cached block is loaded (header & block body), load missing parts + block.mutex.Lock() + defer block.mutex.Unlock() + if block.header == nil { + if header == nil { + headerRsp, err := client.rpcClient.GetBlockHeaderByBlockroot(block.Root) + if err != nil { + return err + } + header = &headerRsp.Data.Header + } + block.header = header + } + if block.block == nil && !block.isInDb { + blockRsp, err := client.rpcClient.GetBlockBodyByBlockroot(block.Root) + if err != nil { + return err + } + block.block = &blockRsp.Data + } + // set seen flag + clientFlag := uint64(1) << client.clientIdx + block.seenBy |= clientFlag + return nil +} + +func (client *IndexerClient) pollLatestBlocks() error { + // get latest header + latestHeader, err := client.rpcClient.GetLatestBlockHead() + if err != nil { + return fmt.Errorf("could not get latest header: %v", err) + } + if latestHeader == nil { + return fmt.Errorf("could not find latest header") + } + client.setHeadBlock(latestHeader.Data.Root, uint64(latestHeader.Data.Header.Message.Slot)) + + currentBlock, isNewBlock := client.indexerCache.createOrGetCachedBlock(latestHeader.Data.Root, uint64(latestHeader.Data.Header.Message.Slot)) + if isNewBlock { + logger.WithField("client", client.clientName).Infof("received block %v:%v [0x%x] polled, head", utils.EpochOfSlot(uint64(client.lastHeadSlot)), client.lastHeadSlot, client.lastHeadRoot) + } else { + logger.WithField("client", client.clientName).Debugf("received known block %v:%v [0x%x] polled, head", utils.EpochOfSlot(uint64(client.lastHeadSlot)), client.lastHeadSlot, client.lastHeadRoot) + } + err = client.ensureBlock(currentBlock, &latestHeader.Data.Header) + if err != nil { + return err + } + err = client.ensureParentBlocks(currentBlock) + if err != nil { + return err + } + return nil +} + +func (client *IndexerClient) ensureParentBlocks(currentBlock *CacheBlock) error { + // walk backwards and load all blocks until we reach a block that is marked as seen by this client or is smaller than finalized + parentRoot := []byte(currentBlock.header.Message.ParentRoot) + for { + var parentHead *rpctypes.SignedBeaconBlockHeader + parentBlock := client.indexerCache.getCachedBlock(parentRoot) + if parentBlock != nil { + parentBlock.mutex.RLock() + parentHead = parentBlock.header + // check if already marked as seen by this client + clientFlag := uint64(1) << client.clientIdx + isSeen := parentBlock.seenBy&clientFlag > 0 + parentBlock.mutex.RUnlock() + if isSeen { + break + } + } + if parentHead == nil { + headerRsp, err := client.rpcClient.GetBlockHeaderByBlockroot(parentRoot) + if err != nil { + return fmt.Errorf("could not load parent header [0x%x]: %v", parentRoot, err) + } + if headerRsp == nil { + return fmt.Errorf("could not find parent header [0x%x]", parentRoot) + } + parentHead = &headerRsp.Data.Header + } + parentSlot := uint64(parentHead.Message.Slot) + isNewBlock := false + if parentBlock == nil { + parentBlock, isNewBlock = client.indexerCache.createOrGetCachedBlock(parentRoot, parentSlot) + } + if isNewBlock { + logger.WithField("client", client.clientName).Infof("received block %v:%v [0x%x] backfill", utils.EpochOfSlot(parentSlot), parentSlot, parentRoot) + } else { + logger.WithField("client", client.clientName).Debugf("received known block %v:%v [0x%x] backfill", utils.EpochOfSlot(parentSlot), parentSlot, parentRoot) + } + client.ensureBlock(parentBlock, parentHead) + if int64(utils.EpochOfSlot(parentSlot)) <= client.lastFinalizedEpoch { + logger.WithField("client", client.clientName).Debugf("backfill cache: reached finalized slot %v:%v [0x%x]", utils.EpochOfSlot(parentSlot), parentSlot, parentRoot) + break + } + if parentSlot == 0 { + logger.WithField("client", client.clientName).Debugf("backfill cache: reached gensis slot [0x%x]", parentRoot) + break + } + parentRoot = parentHead.Message.ParentRoot + } + return nil +} + +func (client *IndexerClient) setHeadBlock(root []byte, slot uint64) error { + client.cacheMutex.Lock() + if bytes.Equal(client.lastHeadRoot, root) { + client.cacheMutex.Unlock() + return nil + } + client.lastHeadSlot = int64(slot) + client.lastHeadRoot = root + client.cacheMutex.Unlock() + + return nil +} + +func (client *IndexerClient) processBlockEvent(evt *rpctypes.StandardV1StreamedBlockEvent) error { + currentBlock, isNewBlock := client.indexerCache.createOrGetCachedBlock(evt.Block, uint64(evt.Slot)) + if isNewBlock { + logger.WithField("client", client.clientName).Infof("received block %v:%v [0x%x] stream", utils.EpochOfSlot(currentBlock.Slot), currentBlock.Slot, currentBlock.Root) + } else { + logger.WithField("client", client.clientName).Debugf("received known block %v:%v [0x%x] stream", utils.EpochOfSlot(currentBlock.Slot), currentBlock.Slot, currentBlock.Root) + } + err := client.ensureBlock(currentBlock, nil) + if err != nil { + return err + } + err = client.ensureParentBlocks(currentBlock) + if err != nil { + return err + } + return nil +} + +func (client *IndexerClient) processHeadEvent(evt *rpctypes.StandardV1StreamedHeadEvent) error { + currentBlock := client.indexerCache.getCachedBlock(evt.Block) + if currentBlock == nil { + return fmt.Errorf("received head event for non existing block: %v", evt.Block.String()) + } + return client.setHeadBlock(evt.Block, uint64(evt.Slot)) +} + +func (client *IndexerClient) processFinalizedEvent(evt *rpctypes.StandardV1StreamedFinalizedCheckpointEvent) error { + logger.WithField("client", client.clientName).Debugf("received finalization_checkpoint event: epoch %v [%s]", evt.Epoch, evt.Block.String()) + client.indexerCache.setFinalizedHead(int64(evt.Epoch)-1, evt.Block) + return nil +} diff --git a/indexer/epochStats.go b/indexer/epochStats.go new file mode 100644 index 00000000..60d1cd8a --- /dev/null +++ b/indexer/epochStats.go @@ -0,0 +1,334 @@ +package indexer + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" + + "github.com/pk910/light-beaconchain-explorer/dbtypes" + "github.com/pk910/light-beaconchain-explorer/rpctypes" + "github.com/pk910/light-beaconchain-explorer/utils" +) + +type EpochStats struct { + Epoch uint64 + DependentRoot []byte + dependentStateRef string + dutiesInDb bool + dutiesMutex sync.RWMutex + validatorsMutex sync.RWMutex + proposerAssignments map[uint64]uint64 + attestorAssignments map[string][]uint64 + syncAssignments []uint64 + validatorStats *EpochValidatorStats + dbEpochMutex sync.Mutex + dbEpochCache *dbtypes.Epoch +} + +type EpochValidatorStats struct { + ValidatorCount uint64 + ValidatorBalance uint64 + EligibleAmount uint64 + ValidatorBalances map[uint64]uint64 +} + +func (cache *indexerCache) getEpochStats(epoch uint64, dependendRoot []byte) *EpochStats { + cache.epochStatsMutex.RLock() + defer cache.epochStatsMutex.RUnlock() + if cache.epochStatsMap[epoch] != nil { + for _, epochStats := range cache.epochStatsMap[epoch] { + if dependendRoot == nil || bytes.Equal(epochStats.DependentRoot, dependendRoot) { + return epochStats + } + } + } + return nil +} + +func (cache *indexerCache) createOrGetEpochStats(epoch uint64, dependendRoot []byte) (*EpochStats, bool) { + cache.epochStatsMutex.Lock() + defer cache.epochStatsMutex.Unlock() + if cache.epochStatsMap[epoch] == nil { + cache.epochStatsMap[epoch] = make([]*EpochStats, 0) + } else { + for _, epochStats := range cache.epochStatsMap[epoch] { + if bytes.Equal(epochStats.DependentRoot, dependendRoot) { + return epochStats, false + } + } + } + epochStats := &EpochStats{ + Epoch: epoch, + DependentRoot: dependendRoot, + } + cache.epochStatsMap[epoch] = append(cache.epochStatsMap[epoch], epochStats) + return epochStats, true +} + +func (cache *indexerCache) removeEpochStats(epochStats *EpochStats) { + cache.epochStatsMutex.Lock() + defer cache.epochStatsMutex.Unlock() + logger.Debugf("remove cached epoch stats: %v", epochStats.Epoch) + + allEpochStats := cache.epochStatsMap[epochStats.Epoch] + if allEpochStats != nil { + var idx uint64 + len := uint64(len(allEpochStats)) + for idx = 0; idx < len; idx++ { + if allEpochStats[idx] == epochStats { + break + } + } + if idx < len { + if len == 1 { + delete(cache.epochStatsMap, epochStats.Epoch) + } else { + if idx < len-1 { + cache.epochStatsMap[epochStats.Epoch][idx] = cache.epochStatsMap[epochStats.Epoch][len-1] + } + cache.epochStatsMap[epochStats.Epoch] = cache.epochStatsMap[epochStats.Epoch][0 : len-1] + } + } + } +} + +func (epochStats *EpochStats) GetDependentStateRef() string { + epochStats.dutiesMutex.RLock() + defer epochStats.dutiesMutex.RUnlock() + return epochStats.dependentStateRef +} + +func (epochStats *EpochStats) GetProposerAssignments() map[uint64]uint64 { + epochStats.dutiesMutex.RLock() + defer epochStats.dutiesMutex.RUnlock() + return epochStats.proposerAssignments +} + +func (epochStats *EpochStats) GetAttestorAssignments() map[string][]uint64 { + epochStats.dutiesMutex.RLock() + defer epochStats.dutiesMutex.RUnlock() + return epochStats.attestorAssignments +} + +func (epochStats *EpochStats) GetSyncAssignments() []uint64 { + epochStats.dutiesMutex.RLock() + defer epochStats.dutiesMutex.RUnlock() + return epochStats.syncAssignments +} + +func (client *IndexerClient) ensureEpochStats(epoch uint64, head []byte) error { + var dependendRoot []byte + var proposerRsp *rpctypes.StandardV1ProposerDutiesResponse + if epoch > 0 { + firstBlock := client.indexerCache.getFirstCanonicalBlock(epoch, head) + if firstBlock != nil { + logger.WithField("client", client.clientName).Debugf("canonical first block for epoch %v: %v/0x%x (head: 0x%x)", epoch, firstBlock.Slot, firstBlock.Root, head) + firstBlock.mutex.RLock() + if firstBlock.header != nil { + dependendRoot = firstBlock.header.Message.ParentRoot + } + firstBlock.mutex.RUnlock() + } + if dependendRoot == nil { + lastBlock := client.indexerCache.getLastCanonicalBlock(epoch-1, head) + if lastBlock != nil { + logger.WithField("client", client.clientName).Debugf("canonical last block for epoch %v: %v/0x%x (head: 0x%x)", epoch-1, lastBlock.Slot, lastBlock.Root, head) + dependendRoot = lastBlock.Root + } + } + } + if dependendRoot == nil { + var err error + proposerRsp, err = client.rpcClient.GetProposerDuties(epoch) + if err != nil { + logger.WithField("client", client.clientName).Warnf("could not load proposer duties for epoch %v: %v", epoch, err) + } + if proposerRsp == nil { + return fmt.Errorf("could not find proposer duties for epoch %v", epoch) + } + dependendRoot = proposerRsp.DependentRoot + } + + epochStats, isNewStats := client.indexerCache.createOrGetEpochStats(epoch, dependendRoot) + if isNewStats { + logger.WithField("client", client.clientName).Infof("load epoch stats for epoch %v (dependend: 0x%x)", epoch, dependendRoot) + } else { + logger.WithField("client", client.clientName).Debugf("ensure epoch stats for epoch %v (dependend: 0x%x)", epoch, dependendRoot) + } + go epochStats.ensureEpochStatsLazy(client, proposerRsp) + if int64(epoch) > client.lastEpochStats { + client.lastEpochStats = int64(epoch) + } + return nil +} + +func (epochStats *EpochStats) ensureEpochStatsLazy(client *IndexerClient, proposerRsp *rpctypes.StandardV1ProposerDutiesResponse) { + defer func() { + if err := recover(); err != nil { + logger.WithField("client", client.clientName).Errorf("uncaught panic in ensureEpochStats subroutine: %v", err) + } + }() + epochStats.dutiesMutex.Lock() + defer epochStats.dutiesMutex.Unlock() + + if epochStats.dutiesInDb { + return + } + + // proposer duties + if epochStats.proposerAssignments == nil { + if proposerRsp == nil { + var err error + proposerRsp, err = client.rpcClient.GetProposerDuties(epochStats.Epoch) + if err != nil { + logger.WithField("client", client.clientName).Warnf("could not lazy load proposer duties for epoch %v: %v", epochStats.Epoch, err) + } + if proposerRsp == nil { + logger.WithField("client", client.clientName).Errorf("could not find proposer duties for epoch %v", epochStats.Epoch) + return + } + if !bytes.Equal(proposerRsp.DependentRoot, epochStats.DependentRoot) { + logger.WithField("client", client.clientName).Errorf("got unexpected dependend root for proposer duties %v (got: %v, expected: 0x%x)", epochStats.Epoch, proposerRsp.DependentRoot.String(), epochStats.DependentRoot) + return + } + } + epochStats.proposerAssignments = map[uint64]uint64{} + for _, duty := range proposerRsp.Data { + epochStats.proposerAssignments[uint64(duty.Slot)] = uint64(duty.ValidatorIndex) + } + } + + // get state root for dependend root + if epochStats.dependentStateRef == "" { + if epochStats.Epoch == 0 { + epochStats.dependentStateRef = "genesis" + } else if dependendBlock := client.indexerCache.getCachedBlock(epochStats.DependentRoot); dependendBlock != nil { + if dependendBlock.Slot == 0 { + epochStats.dependentStateRef = "genesis" + } else { + dependendBlock.mutex.RLock() + epochStats.dependentStateRef = dependendBlock.header.Message.StateRoot.String() + dependendBlock.mutex.RUnlock() + } + } else { + parsedHeader, err := client.rpcClient.GetBlockHeaderByBlockroot(epochStats.DependentRoot) + if err != nil { + logger.WithField("client", client.clientName).Errorf("could not get dependent block header for epoch %v (0x%x)", epochStats.Epoch, epochStats.DependentRoot) + } + if parsedHeader.Data.Header.Message.Slot == 0 { + epochStats.dependentStateRef = "genesis" + } else { + epochStats.dependentStateRef = parsedHeader.Data.Header.Message.StateRoot.String() + } + } + } + + // load validators + if epochStats.validatorStats == nil { + go epochStats.ensureValidatorStatsLazy(client, epochStats.dependentStateRef) + } + + // get committee duties + if epochStats.attestorAssignments == nil { + if epochStats.dependentStateRef == "" { + return + } + parsedCommittees, err := client.rpcClient.GetCommitteeDuties(epochStats.dependentStateRef, epochStats.Epoch) + if err != nil { + logger.WithField("client", client.clientName).Errorf("error retrieving committees data: %v", err) + return + } + epochStats.attestorAssignments = map[string][]uint64{} + for _, committee := range parsedCommittees.Data { + for i, valIndex := range committee.Validators { + valIndexU64, err := strconv.ParseUint(valIndex, 10, 64) + if err != nil { + logger.WithField("client", client.clientName).Warnf("epoch %d committee %d index %d has bad validator index %q", epochStats.Epoch, committee.Index, i, valIndex) + continue + } + k := fmt.Sprintf("%v-%v", uint64(committee.Slot), uint64(committee.Index)) + if epochStats.attestorAssignments[k] == nil { + epochStats.attestorAssignments[k] = make([]uint64, 0) + } + epochStats.attestorAssignments[k] = append(epochStats.attestorAssignments[k], valIndexU64) + } + } + } + + // get sync committee duties + if epochStats.syncAssignments == nil && epochStats.Epoch >= utils.Config.Chain.Config.AltairForkEpoch { + syncCommitteeState := epochStats.dependentStateRef + if epochStats.Epoch > 0 && epochStats.Epoch == utils.Config.Chain.Config.AltairForkEpoch { + syncCommitteeState = fmt.Sprintf("%d", utils.Config.Chain.Config.AltairForkEpoch*utils.Config.Chain.Config.SlotsPerEpoch) + } + if syncCommitteeState == "" { + return + } + parsedSyncCommittees, err := client.rpcClient.GetSyncCommitteeDuties(syncCommitteeState, epochStats.Epoch) + if err != nil { + logger.WithField("client", client.clientName).Errorf("error retrieving sync_committees for epoch %v (state: %v): %v", epochStats.Epoch, syncCommitteeState, err) + } + epochStats.syncAssignments = make([]uint64, len(parsedSyncCommittees.Data.Validators)) + for i, valIndexStr := range parsedSyncCommittees.Data.Validators { + valIndexU64, err := strconv.ParseUint(valIndexStr, 10, 64) + if err != nil { + logger.WithField("client", client.clientName).Errorf("in sync_committee for epoch %d validator %d has bad validator index: %q", epochStats.Epoch, i, valIndexStr) + continue + } + epochStats.syncAssignments[i] = valIndexU64 + } + } +} + +func (epochStats *EpochStats) ensureValidatorStatsLazy(client *IndexerClient, stateRef string) { + defer func() { + if err := recover(); err != nil { + logger.WithField("client", client.clientName).Errorf("uncaught panic in ensureValidatorStats subroutine: %v", err) + } + }() + epochStats.loadValidatorStats(client, stateRef) +} + +func (epochStats *EpochStats) loadValidatorStats(client *IndexerClient, stateRef string) { + epochStats.validatorsMutex.Lock() + defer epochStats.validatorsMutex.Unlock() + if epochStats.validatorStats != nil { + return + } + + // `lock` concurrency limit (limit concurrent get validators calls) + client.indexerCache.validatorLoadingLimiter <- 1 + + var epochValidators *rpctypes.StandardV1StateValidatorsResponse + var err error + if epochStats.Epoch == 0 { + epochValidators, err = client.rpcClient.GetGenesisValidators() + } else { + epochValidators, err = client.rpcClient.GetStateValidators(stateRef) + } + + // `unlock` concurrency limit + <-client.indexerCache.validatorLoadingLimiter + + if err != nil { + logger.Errorf("error fetching epoch %v validators: %v", epochStats.Epoch, err) + return + } + client.indexerCache.setLastValidators(epochStats.Epoch, epochValidators) + validatorStats := &EpochValidatorStats{ + ValidatorBalances: make(map[uint64]uint64), + } + for idx := 0; idx < len(epochValidators.Data); idx++ { + validator := epochValidators.Data[idx] + validatorStats.ValidatorBalances[uint64(validator.Index)] = uint64(validator.Validator.EffectiveBalance) + if !strings.HasPrefix(validator.Status, "active") { + continue + } + validatorStats.ValidatorCount++ + validatorStats.ValidatorBalance += uint64(validator.Balance) + validatorStats.EligibleAmount += uint64(validator.Validator.EffectiveBalance) + } + epochStats.validatorStats = validatorStats +} diff --git a/indexer/indexer.go b/indexer/indexer.go index 99da4597..9c86d0d1 100644 --- a/indexer/indexer.go +++ b/indexer/indexer.go @@ -2,14 +2,11 @@ package indexer import ( "bytes" - "errors" - "strings" - "sync" - "time" + "math/rand" + "sort" "github.com/sirupsen/logrus" - "github.com/pk910/light-beaconchain-explorer/db" "github.com/pk910/light-beaconchain-explorer/dbtypes" "github.com/pk910/light-beaconchain-explorer/rpc" "github.com/pk910/light-beaconchain-explorer/rpctypes" @@ -19,782 +16,351 @@ import ( var logger = logrus.StandardLogger().WithField("module", "indexer") type Indexer struct { - rpcClient *rpc.BeaconClient - controlMutex sync.Mutex - runMutex sync.Mutex - running bool - writeDb bool - prepopulateEpochs uint16 - inMemoryEpochs uint16 - epochProcessingDelay uint16 - state indexerState - synchronizer *synchronizerState -} - -type indexerState struct { - lastHeadBlock uint64 - lastHeadRoot []byte - lastFinalizedBlock uint64 - cacheMutex sync.RWMutex - cachedBlocks map[uint64][]*BlockInfo - epochStats map[uint64]*EpochStats - headValidators *rpctypes.StandardV1StateValidatorsResponse - headValidatorsEpoch int64 - lowestCachedSlot int64 - highestCachedSlot int64 - lastProcessedEpoch int64 -} - -type EpochStats struct { - dependendRoot []byte - AssignmentsMutex sync.Mutex - assignmentsFailed bool - Validators *EpochValidators - Assignments *rpctypes.EpochAssignments -} + indexerCache *indexerCache + indexerClients []*IndexerClient -type EpochValidators struct { - ValidatorsReadyMutex sync.Mutex - ValidatorsStatsMutex sync.RWMutex - ValidatorCount uint64 - ValidatorBalance uint64 - EligibleAmount uint64 - ValidatorBalances map[uint64]uint64 + writeDb bool + inMemoryEpochs uint16 } -func NewIndexer(rpcClient *rpc.BeaconClient) (*Indexer, error) { +func NewIndexer() (*Indexer, error) { inMemoryEpochs := utils.Config.Indexer.InMemoryEpochs if inMemoryEpochs < 2 { inMemoryEpochs = 2 } - epochProcessingDelay := utils.Config.Indexer.EpochProcessingDelay - if epochProcessingDelay < 2 { - epochProcessingDelay = 2 - } else if epochProcessingDelay > inMemoryEpochs { - inMemoryEpochs = epochProcessingDelay - } - prepopulateEpochs := utils.Config.Indexer.PrepopulateEpochs - if prepopulateEpochs > inMemoryEpochs { - prepopulateEpochs = inMemoryEpochs - } - return &Indexer{ - rpcClient: rpcClient, - writeDb: !utils.Config.Indexer.DisableIndexWriter, - prepopulateEpochs: prepopulateEpochs, - inMemoryEpochs: inMemoryEpochs, - epochProcessingDelay: epochProcessingDelay, - state: indexerState{ - cachedBlocks: make(map[uint64][]*BlockInfo), - epochStats: make(map[uint64]*EpochStats), - headValidatorsEpoch: -1, - lowestCachedSlot: -1, - highestCachedSlot: -1, - lastProcessedEpoch: -1, - }, - }, nil -} -func (indexer *Indexer) Start() error { - indexer.controlMutex.Lock() - defer indexer.controlMutex.Unlock() + indexer := &Indexer{ + indexerClients: make([]*IndexerClient, 0), - if indexer.running { - return errors.New("indexer already running") + writeDb: !utils.Config.Indexer.DisableIndexWriter, + inMemoryEpochs: inMemoryEpochs, } - indexer.running = true + indexer.indexerCache = newIndexerCache(indexer) - go indexer.runIndexer() - - return nil -} - -func (indexer *Indexer) GetLowestCachedSlot() int64 { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - return indexer.state.lowestCachedSlot + return indexer, nil } -func (indexer *Indexer) GetHeadSlot() uint64 { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - return indexer.state.lastHeadBlock -} - -func (indexer *Indexer) GetCachedBlocks(slot uint64) []*BlockInfo { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - - if slot < uint64(indexer.state.lowestCachedSlot) { - return nil - } - blocks := indexer.state.cachedBlocks[slot] - if blocks == nil { +func (indexer *Indexer) AddClient(index uint8, name string, endpoint string, archive bool, priority int) *IndexerClient { + rpcClient, err := rpc.NewBeaconClient(endpoint) + if err != nil { + logger.Errorf("error while adding client %v to indexer: %v", name, err) return nil } - resBlocks := make([]*BlockInfo, len(blocks)) - copy(resBlocks, blocks) - return resBlocks + client := newIndexerClient(index, name, rpcClient, indexer.indexerCache, archive, priority) + indexer.indexerClients = append(indexer.indexerClients, client) + return client } -func (indexer *Indexer) GetCachedBlock(root []byte) *BlockInfo { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - - if indexer.state.lowestCachedSlot < 0 { - return nil +func (indexer *Indexer) getReadyClient(archive bool, head []byte) *IndexerClient { + headCandidates := indexer.GetHeadForks() + if len(headCandidates) == 0 { + return indexer.indexerClients[0] } - for slotIdx := int64(indexer.state.lastHeadBlock); slotIdx >= indexer.state.lowestCachedSlot; slotIdx-- { - slot := uint64(slotIdx) - if indexer.state.cachedBlocks[slot] != nil { - blocks := indexer.state.cachedBlocks[slot] - for bidx := 0; bidx < len(blocks); bidx++ { - if bytes.Equal(blocks[bidx].Header.Data.Root, root) { - return blocks[bidx] - } - } - } - } - return nil -} -func (indexer *Indexer) GetCachedBlockByStateroot(stateroot []byte) *BlockInfo { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - - if indexer.state.lowestCachedSlot < 0 { - return nil - } - for slotIdx := int64(indexer.state.lastHeadBlock); slotIdx >= indexer.state.lowestCachedSlot; slotIdx-- { - slot := uint64(slotIdx) - if indexer.state.cachedBlocks[slot] != nil { - blocks := indexer.state.cachedBlocks[slot] - for bidx := 0; bidx < len(blocks); bidx++ { - if bytes.Equal(blocks[bidx].Header.Data.Header.Message.StateRoot, stateroot) { - return blocks[bidx] + var headFork *HeadFork + if head != nil { + cachedBlock := indexer.indexerCache.getCachedBlock(head) + if cachedBlock != nil { + for _, fork := range headCandidates { + if indexer.indexerCache.isCanonicalBlock(head, fork.Root) { + headFork = fork + break } } } } - return nil -} - -func (indexer *Indexer) GetCachedEpochStats(epoch uint64) *EpochStats { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - return indexer.state.epochStats[epoch] -} - -func (indexer *Indexer) GetCachedValidatorSet() *rpctypes.StandardV1StateValidatorsResponse { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - return indexer.state.headValidators -} - -func (indexer *Indexer) GetEpochVotes(epoch uint64) *EpochVotes { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - - epochStats := indexer.state.epochStats[epoch] - if epochStats == nil { - return nil + if headFork == nil { + headFork = headCandidates[0] } - return indexer.getEpochVotes(epoch, epochStats) -} - -func (indexer *Indexer) getEpochVotes(epoch uint64, epochStats *EpochStats) *EpochVotes { - var firstBlock *BlockInfo - firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch - lastSlot := firstSlot + (utils.Config.Chain.Config.SlotsPerEpoch) - 1 -slotLoop: - for slot := firstSlot; slot <= lastSlot; slot++ { - if indexer.state.cachedBlocks[slot] != nil { - blocks := indexer.state.cachedBlocks[slot] - for bidx := 0; bidx < len(blocks); bidx++ { - if !blocks[bidx].Orphaned { - firstBlock = blocks[bidx] - break slotLoop - } - } - } + clientCandidates := indexer.getReadyClientCandidates(headFork, archive) + if len(clientCandidates) == 0 && archive { + clientCandidates = indexer.getReadyClientCandidates(headFork, false) } - if firstBlock == nil { - return nil - } - - var targetRoot []byte - if uint64(firstBlock.Header.Data.Header.Message.Slot) == firstSlot { - targetRoot = firstBlock.Header.Data.Root - } else { - targetRoot = firstBlock.Header.Data.Header.Message.ParentRoot + candidateCount := len(clientCandidates) + if candidateCount == 0 { + return indexer.indexerClients[0] } - return aggregateEpochVotes(indexer.state.cachedBlocks, epoch, epochStats, targetRoot, false) + selectedIndex := rand.Intn(candidateCount) + return clientCandidates[selectedIndex] } -func (indexer *Indexer) BuildLiveEpoch(epoch uint64) *dbtypes.Epoch { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - - epochStats := indexer.state.epochStats[epoch] - if epochStats == nil { - return nil - } - epochVotes := indexer.getEpochVotes(epoch, epochStats) - return buildDbEpoch(epoch, indexer.state.cachedBlocks, epochStats, epochVotes, nil) -} - -func (indexer *Indexer) BuildLiveBlock(block *BlockInfo) *dbtypes.Block { - epoch := utils.EpochOfSlot(uint64(block.Header.Data.Header.Message.Slot)) - epochStats := indexer.state.epochStats[epoch] - return buildDbBlock(block, epochStats) -} - -func (indexer *Indexer) runIndexer() { - indexer.runMutex.Lock() - defer indexer.runMutex.Unlock() - - for { - runIndexerLoop := true - - genesis, err := indexer.rpcClient.GetGenesis() - if err != nil { - logger.Errorf("Indexer Error while fetching genesis: %v", err) - runIndexerLoop = false - } else if genesis != nil { - genesisTime := uint64(genesis.Data.GenesisTime) - if genesisTime != utils.Config.Chain.GenesisTimestamp { - logger.Warnf("Genesis time from RPC does not match the genesis time from explorer configuration.") - runIndexerLoop = false - } - if genesis.Data.GenesisForkVersion.String() != utils.Config.Chain.Config.GenesisForkVersion { - logger.Warnf("Genesis fork version from RPC does not match the genesis fork version explorer configuration.") - runIndexerLoop = false - } - } - - if runIndexerLoop { - syncStatus, err := indexer.rpcClient.GetNodeSyncing() - if err != nil { - logger.Errorf("Indexer Error while fetching syncing status: %v", err) - runIndexerLoop = false - } else if syncStatus != nil { - if syncStatus.Data.IsSyncing { - logger.Errorf("Cannot run indexer, beacon node is synchronizing.") - runIndexerLoop = false - } - } +func (indexer *Indexer) getReadyClientCandidates(headFork *HeadFork, archive bool) []*IndexerClient { + var clientCandidates []*IndexerClient = nil + for _, client := range headFork.ReadyClients { + if archive && !client.archive { + continue } - - if runIndexerLoop { - err := indexer.runIndexerLoop() - if err == nil { - break - } + if clientCandidates != nil && clientCandidates[0].priority != client.priority { + break } - - logger.Warnf("Indexer couldn't do stuff it is supposed to do. Retrying in 10 sec...") - select { - case <-time.After(10 * time.Second): + if clientCandidates != nil { + clientCandidates = append(clientCandidates, client) + } else { + clientCandidates = []*IndexerClient{client} } } - - logger.Debugf("Indexer process shutdown") + return clientCandidates } -func (indexer *Indexer) runIndexerLoop() error { - chainConfig := utils.Config.Chain.Config - genesisTime := time.Unix(int64(utils.Config.Chain.GenesisTimestamp), 0) +func (indexer *Indexer) GetRpcClient(archive bool, head []byte) *rpc.BeaconClient { + return indexer.getReadyClient(archive, head).rpcClient +} - if now := time.Now(); now.Compare(genesisTime) > 0 { - currentEpoch := utils.TimeToEpoch(time.Now()) - if currentEpoch > int64(indexer.prepopulateEpochs) { - indexer.state.lastHeadBlock = uint64((currentEpoch-int64(indexer.prepopulateEpochs)+1)*int64(chainConfig.SlotsPerEpoch)) - 1 - } - if currentEpoch > int64(indexer.epochProcessingDelay) { - indexer.state.lastProcessedEpoch = currentEpoch - int64(indexer.epochProcessingDelay) - } - } +func (indexer *Indexer) GetFinalizedEpoch() (int64, []byte) { + return indexer.indexerCache.getFinalizedHead() +} - // fill indexer cache - err := indexer.pollHeadBlock() - if err != nil { - logger.Errorf("Indexer Error while polling latest head: %v", err) - return err +func (indexer *Indexer) GetHighestSlot() uint64 { + indexer.indexerCache.cacheMutex.RLock() + defer indexer.indexerCache.cacheMutex.RUnlock() + if indexer.indexerCache.highestSlot < 0 { + return 0 } + return uint64(indexer.indexerCache.highestSlot) +} - // start block stream - blockStream := indexer.rpcClient.NewBlockStream() - defer blockStream.Close() - - // check if we need to start a sync job (last synced epoch < lastProcessedEpoch) - if indexer.writeDb { - syncState := dbtypes.IndexerSyncState{} - db.GetExplorerState("indexer.syncstate", &syncState) - if int64(syncState.Epoch) < indexer.state.lastProcessedEpoch { - indexer.startSynchronization(syncState.Epoch) +func (indexer *Indexer) GetHeadForks() []*HeadFork { + headForks := []*HeadFork{} + for _, client := range indexer.indexerClients { + if !client.isConnected || client.isSynchronizing { + continue } - } - - // run indexer loop - for { - indexer.controlMutex.Lock() - isRunning := indexer.running - indexer.controlMutex.Unlock() - if !isRunning { - break + cHeadSlot, cHeadRoot := client.getLastHead() + var matchingFork *HeadFork + for _, fork := range headForks { + if bytes.Equal(fork.Root, cHeadRoot) || indexer.indexerCache.isCanonicalBlock(cHeadRoot, fork.Root) { + matchingFork = fork + break + } + if indexer.indexerCache.isCanonicalBlock(fork.Root, cHeadRoot) { + fork.Root = cHeadRoot + fork.Slot = uint64(cHeadSlot) + matchingFork = fork + break + } } - - select { - case headEvt := <-blockStream.HeadChan: - //logger.Infof("RPC Event: Head %v (root: %v, dep: %v)", headEvt.Slot, headEvt.Block, headEvt.CurrentDutyDependentRoot) - indexer.processHeadEpoch(utils.EpochOfSlot(uint64(headEvt.Slot)), headEvt.CurrentDutyDependentRoot) - case blockEvt := <-blockStream.BlockChan: - //logger.Infof("RPC Event: Block %v (root: %v)", blockEvt.Slot, blockEvt.Block) - indexer.pollStreamedBlock(blockEvt.Block) - case <-blockStream.CloseChan: - logger.Warnf("Indexer lost connection to beacon event stream. Reconnection in 5 sec") - time.Sleep(5 * time.Second) - blockStream.Start() - err := indexer.pollHeadBlock() - if err != nil { - logger.Errorf("Indexer Error while polling latest head: %v", err) + if matchingFork == nil { + matchingFork = &HeadFork{ + Root: cHeadRoot, + Slot: uint64(cHeadSlot), + AllClients: []*IndexerClient{client}, + } + headForks = append(headForks, matchingFork) + } else { + matchingFork.AllClients = append(matchingFork.AllClients, client) + } + } + for _, fork := range headForks { + fork.ReadyClients = make([]*IndexerClient, 0) + sort.Slice(fork.AllClients, func(a, b int) bool { + prioA := fork.AllClients[a].priority + prioB := fork.AllClients[b].priority + return prioA > prioB + }) + for _, client := range fork.AllClients { + var headDistance uint64 = 0 + _, cHeadRoot := client.getLastHead() + if !bytes.Equal(fork.Root, cHeadRoot) { + _, headDistance = indexer.indexerCache.getCanonicalDistance(cHeadRoot, fork.Root) } - case <-time.After(30 * time.Second): - logger.Info("No head event since 30 secs, polling chain head") - err := indexer.pollHeadBlock() - if err != nil { - logger.Errorf("Indexer Error while polling latest head: %v", err) + if headDistance < 2 { + fork.ReadyClients = append(fork.ReadyClients, client) } } - - now := time.Now() - indexer.processIndexing() - indexer.processCacheCleanup() - logger.Debugf("indexer loop processing time: %v ms", time.Now().Sub(now).Milliseconds()) - } - - return nil -} - -func (indexer *Indexer) startSynchronization(startEpoch uint64) error { - if !indexer.writeDb { - return nil - } - - indexer.controlMutex.Lock() - defer indexer.controlMutex.Unlock() - - if indexer.synchronizer == nil { - indexer.synchronizer = newSynchronizer(indexer) } - if !indexer.synchronizer.isEpochAhead(startEpoch) { - indexer.synchronizer.startSync(startEpoch) - } - return nil -} -func (indexer *Indexer) pollHeadBlock() error { - header, err := indexer.rpcClient.GetLatestBlockHead() - if err != nil { - return err - } - if bytes.Equal(header.Data.Root, indexer.state.lastHeadRoot) { - return nil // chain head didn't proceed, block missied? - } - block, err := indexer.rpcClient.GetBlockBodyByBlockroot(header.Data.Root) - if err != nil { - return err - } - - headSlot := uint64(header.Data.Header.Message.Slot) - if indexer.state.lastHeadBlock < headSlot-1 { - backfillSlot := indexer.state.lastHeadBlock + 1 - for backfillSlot < headSlot { - indexer.pollBackfillBlock(backfillSlot) - backfillSlot++ + // sort by relevance (client count & head slot) + sort.Slice(headForks, func(a, b int) bool { + slotA := headForks[a].Slot + slotB := headForks[b].Slot + if slotA > slotB && slotA-slotB >= 16 { + return true + } else if slotB > slotA && slotB-slotA >= 16 { + return false + } else { + countA := len(headForks[a].ReadyClients) + countB := len(headForks[b].ReadyClients) + return countA > countB } - } - - epoch := utils.EpochOfSlot(headSlot) - logger.Infof("Process latest slot %v/%v: %v", epoch, headSlot, header.Data.Root) - indexer.processHeadEpoch(epoch, nil) - indexer.processHeadBlock(headSlot, header, block) + }) - return nil + return headForks } -func (indexer *Indexer) pollBackfillBlock(slot uint64) (*BlockInfo, error) { - header, err := indexer.rpcClient.GetBlockHeaderBySlot(slot) - if err != nil { - return nil, err - } - if header == nil { - logger.Infof("Process missed slot %v/%v", utils.EpochOfSlot(slot), slot) - return nil, nil - } - block, err := indexer.rpcClient.GetBlockBodyByBlockroot(header.Data.Root) - if err != nil { - return nil, err +func (indexer *Indexer) GetCanonicalHead() (uint64, []byte) { + headCandidates := indexer.GetHeadForks() + if len(headCandidates) == 0 { + return 0, nil } - epoch := utils.EpochOfSlot(uint64(header.Data.Header.Message.Slot)) - logger.Infof("Process polled slot %v/%v: %v", epoch, header.Data.Header.Message.Slot, header.Data.Root) - indexer.processHeadEpoch(epoch, nil) - blockInfo := indexer.processHeadBlock(slot, header, block) - - return blockInfo, nil + return headCandidates[0].Slot, headCandidates[0].Root } -func (indexer *Indexer) pollStreamedBlock(root []byte) (*BlockInfo, error) { - header, err := indexer.rpcClient.GetBlockHeaderByBlockroot(root) - if err != nil { - return nil, err - } - block, err := indexer.rpcClient.GetBlockBodyByBlockroot(header.Data.Root) - if err != nil { - return nil, err +func (indexer *Indexer) GetCachedBlocks(slot uint64) []*CacheBlock { + if int64(utils.EpochOfSlot(slot)) <= indexer.indexerCache.finalizedEpoch { + return nil } - - slot := uint64(header.Data.Header.Message.Slot) - if indexer.state.lastHeadBlock < slot-1 { - backfillSlot := indexer.state.lastHeadBlock + 1 - for backfillSlot < slot { - indexer.pollBackfillBlock(backfillSlot) - backfillSlot++ - } + indexer.indexerCache.cacheMutex.RLock() + defer indexer.indexerCache.cacheMutex.RUnlock() + blocks := indexer.indexerCache.slotMap[slot] + if blocks == nil { + return nil } + return blocks +} - logger.Infof("Process stream slot %v/%v: %v", utils.EpochOfSlot(slot), header.Data.Header.Message.Slot, header.Data.Root) - blockInfo := indexer.processHeadBlock(slot, header, block) +func (indexer *Indexer) GetCachedBlock(root []byte) *CacheBlock { + indexer.indexerCache.cacheMutex.RLock() + defer indexer.indexerCache.cacheMutex.RUnlock() - return blockInfo, nil + return indexer.indexerCache.rootMap[string(root)] } -func (indexer *Indexer) processHeadBlock(slot uint64, header *rpctypes.StandardV1BeaconHeaderResponse, block *rpctypes.StandardV2BeaconBlockResponse) *BlockInfo { - indexer.state.cacheMutex.Lock() - defer indexer.state.cacheMutex.Unlock() +func (indexer *Indexer) GetCachedBlockByStateroot(stateroot []byte) *CacheBlock { + indexer.indexerCache.cacheMutex.RLock() + defer indexer.indexerCache.cacheMutex.RUnlock() - blockInfo := &BlockInfo{ - Header: header, - Block: block, - } - if indexer.state.cachedBlocks[slot] == nil { - indexer.state.cachedBlocks[slot] = make([]*BlockInfo, 1) - indexer.state.cachedBlocks[slot][0] = blockInfo + var lowestSlotIdx int64 + if indexer.indexerCache.finalizedEpoch >= 0 { + lowestSlotIdx = (indexer.indexerCache.finalizedEpoch + 1) * int64(utils.Config.Chain.Config.SlotsPerEpoch) } else { - blocks := indexer.state.cachedBlocks[slot] - duplicate := false - for bidx := 0; bidx < len(blocks); bidx++ { - if bytes.Equal(blocks[bidx].Header.Data.Root, header.Data.Root) { - logger.Infof("Received duplicate (reorg) block %v.%v (%v)", slot, bidx, header.Data.Root) - duplicate = true - blockInfo = blocks[bidx] - break - } - } - if !duplicate { - indexer.state.cachedBlocks[slot] = append(blocks, blockInfo) - } - } - if indexer.state.lowestCachedSlot < 0 || int64(slot) < indexer.state.lowestCachedSlot { - indexer.state.lowestCachedSlot = int64(slot) - } - if indexer.state.highestCachedSlot < 0 || int64(slot) > indexer.state.highestCachedSlot { - indexer.state.highestCachedSlot = int64(slot) + lowestSlotIdx = 0 } - - if (indexer.state.lastHeadRoot != nil && !bytes.Equal(indexer.state.lastHeadRoot, header.Data.Header.Message.ParentRoot)) || blockInfo.Orphaned { - // chain did not proceed as usual, check for reorg - logger.Debugf("Unusual chain progress, check for reorg %v (%v)", slot, header.Data.Root) - var canonicalBlock *BlockInfo = blockInfo - - // walk backwards, mark all blocks that are not the parent of canonicalBlock as orphaned - // when we find the parent of canonicalBlock, check if it's orphaned - // if orphaned: set block as new canonicalBlock and continue walking backwards - // if not orphaned, finish index loop and exit (reached end of reorged blocks) - reachedEnd := false - for sidx := indexer.state.highestCachedSlot; sidx >= int64(indexer.state.lowestCachedSlot) && !reachedEnd; sidx-- { - blocks := indexer.state.cachedBlocks[uint64(sidx)] - if blocks == nil { - continue - } - for bidx := 0; bidx < len(blocks); bidx++ { - block := blocks[bidx] - if bytes.Equal(block.Header.Data.Root, canonicalBlock.Header.Data.Root) { - if block.Orphaned { - logger.Infof("Chain reorg: mark %v.%v as canonical (%v)", sidx, bidx, block.Header.Data.Root) - block.Orphaned = false - } - } else if bytes.Equal(block.Header.Data.Root, canonicalBlock.Header.Data.Header.Message.ParentRoot) { - if block.Orphaned { - logger.Infof("Chain reorg: mark %v.%v as canonical (%v)", sidx, bidx, block.Header.Data.Root) - block.Orphaned = false - canonicalBlock = block - } else { - reachedEnd = true - } - } else { - if !block.Orphaned { - logger.Infof("Chain reorg: mark %v.%v as orphaned (%v)", sidx, bidx, block.Header.Data.Root) - block.Orphaned = true - } - } - } - } - if !reachedEnd { - logger.Errorf("Large chain reorg detected, resync needed") - // TODO: Start synchronization - } else { - reorgMinEpoch := int64(utils.EpochOfSlot(uint64(canonicalBlock.Header.Data.Header.Message.Slot))) - if reorgMinEpoch <= indexer.state.lastProcessedEpoch { - logger.Infof("Chain reorg touched processed epochs, reset epoch processing to %v", reorgMinEpoch-1) - indexer.state.lastProcessedEpoch = reorgMinEpoch - 1 + for slotIdx := int64(indexer.indexerCache.highestSlot); slotIdx >= lowestSlotIdx; slotIdx-- { + slot := uint64(slotIdx) + blocks := indexer.indexerCache.slotMap[slot] + for _, block := range blocks { + if bytes.Equal(block.header.Message.StateRoot, stateroot) { + return block } } } - indexer.state.lastHeadBlock = slot - indexer.state.lastHeadRoot = header.Data.Root - - return blockInfo -} - -func (indexer *Indexer) processHeadEpoch(epoch uint64, dependentRoot []byte) { - var epochAssignments *rpctypes.EpochAssignments - if dependentRoot == nil { - if indexer.state.epochStats[epoch] != nil { - return - } - var err error - epochAssignments, err = indexer.rpcClient.GetEpochAssignments(epoch) - if err != nil { - logger.Errorf("Error fetching epoch %v duties: %v", epoch, err) - return - } - dependentRoot = epochAssignments.DependendRoot - } - - epochStats, loadAssignments, loadValidators := indexer.newEpochStats(epoch, dependentRoot) - - if loadAssignments || loadValidators { - go indexer.loadEpochStats(epoch, dependentRoot, epochStats, loadValidators) - } + return nil } -func (indexer *Indexer) newEpochStats(epoch uint64, dependentRoot []byte) (*EpochStats, bool, bool) { - indexer.state.cacheMutex.Lock() - defer indexer.state.cacheMutex.Unlock() +func (indexer *Indexer) GetCachedBlocksByProposer(proposer uint64) []*CacheBlock { + indexer.indexerCache.cacheMutex.RLock() + defer indexer.indexerCache.cacheMutex.RUnlock() - if int64(epoch) < indexer.state.lastProcessedEpoch { - return nil, false, false + resBlocks := make([]*CacheBlock, 0) + var lowestSlotIdx int64 + if indexer.indexerCache.finalizedEpoch >= 0 { + lowestSlotIdx = (indexer.indexerCache.finalizedEpoch + 1) * int64(utils.Config.Chain.Config.SlotsPerEpoch) + } else { + lowestSlotIdx = 0 } - oldEpochStats := indexer.state.epochStats[epoch] - if oldEpochStats != nil && bytes.Equal(oldEpochStats.dependendRoot, dependentRoot) { - loadAssignments := oldEpochStats.assignmentsFailed - if loadAssignments { - oldEpochStats.assignmentsFailed = false - oldEpochStats.AssignmentsMutex = sync.Mutex{} - oldEpochStats.AssignmentsMutex.Lock() + for slotIdx := int64(indexer.indexerCache.highestSlot); slotIdx >= lowestSlotIdx; slotIdx-- { + slot := uint64(slotIdx) + blocks := indexer.indexerCache.slotMap[slot] + for _, block := range blocks { + if uint64(block.header.Message.ProposerIndex) == proposer { + resBlocks = append(resBlocks, block) + } } - - return oldEpochStats, loadAssignments, false } + return resBlocks +} - epochStats := &EpochStats{} - epochStats.dependendRoot = dependentRoot - epochStats.AssignmentsMutex.Lock() - indexer.state.epochStats[epoch] = epochStats - - if oldEpochStats != nil { - epochStats.Validators = oldEpochStats.Validators - } else { - epochStats.Validators = &EpochValidators{ - ValidatorCount: 0, - EligibleAmount: 0, - ValidatorBalances: make(map[uint64]uint64), +func (indexer *Indexer) GetCachedEpochStats(epoch uint64) *EpochStats { + _, headRoot := indexer.GetCanonicalHead() + return indexer.getCachedEpochStats(epoch, headRoot) +} + +func (indexer *Indexer) getCachedEpochStats(epoch uint64, headRoot []byte) *EpochStats { + indexer.indexerCache.epochStatsMutex.RLock() + defer indexer.indexerCache.epochStatsMutex.RUnlock() + var epochStats *EpochStats + epochStatsList := indexer.indexerCache.epochStatsMap[epoch] + for _, stats := range epochStatsList { + if indexer.indexerCache.isCanonicalBlock(stats.DependentRoot, headRoot) { + epochStats = stats + break } - epochStats.Validators.ValidatorsReadyMutex.Lock() - } - - return epochStats, oldEpochStats == nil, oldEpochStats == nil + return epochStats } -func (indexer *Indexer) loadEpochStats(epoch uint64, dependentRoot []byte, epochStats *EpochStats, loadValidators bool) { - if !indexer.loadEpochAssignments(epoch, dependentRoot, epochStats) { - return - } - if loadValidators { - indexer.loadEpochValidators(epoch, epochStats) - } +func (indexer *Indexer) GetCachedValidatorSet() *rpctypes.StandardV1StateValidatorsResponse { + return indexer.indexerCache.lastValidatorsResp } -func (indexer *Indexer) loadEpochAssignments(epoch uint64, dependentRoot []byte, epochStats *EpochStats) bool { - defer epochStats.AssignmentsMutex.Unlock() - logger.Infof("Epoch %v head, fetching assignments (dependend root: 0x%x)", epoch, dependentRoot) - - epochAssignments, err := indexer.rpcClient.GetEpochAssignments(epoch) - if err != nil { - logger.Errorf("Error fetching epoch %v duties: %v", epoch, err) - return false +func (indexer *Indexer) GetEpochVotes(epoch uint64) (*EpochStats, *EpochVotes) { + epochStats := indexer.GetCachedEpochStats(epoch) + if epochStats == nil { + return nil, nil } - epochStats.Assignments = epochAssignments - return true + return epochStats, indexer.getEpochVotes(epoch, epochStats) } -func (indexer *Indexer) loadEpochValidators(epoch uint64, epochStats *EpochStats) { - defer epochStats.Validators.ValidatorsReadyMutex.Unlock() - logger.Infof("Epoch %v head, loading validator set (state: %v)", epoch, epochStats.Assignments.DependendState) +func (indexer *Indexer) getEpochVotes(epoch uint64, epochStats *EpochStats) *EpochVotes { + _, headRoot := indexer.GetCanonicalHead() - // load epoch stats - var epochValidators *rpctypes.StandardV1StateValidatorsResponse - var err error - if epochStats.Assignments.DependendIsGenesis { - epochValidators, err = indexer.rpcClient.GetGenesisValidators() - } else { - epochValidators, err = indexer.rpcClient.GetStateValidators(epochStats.Assignments.DependendState) - } - if err != nil { - logger.Errorf("Error fetching epoch %v validators: %v", epoch, err) + // get epoch target + firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch + firstBlock := indexer.indexerCache.getFirstCanonicalBlock(epoch, headRoot) + var epochTarget []byte + if firstBlock == nil { + logger.Warnf("Counld not find epoch %v target (no block found)", epoch) } else { - indexer.state.cacheMutex.Lock() - if int64(epoch) > indexer.state.headValidatorsEpoch { - indexer.state.headValidatorsEpoch = int64(epoch) - indexer.state.headValidators = epochValidators - } - indexer.state.cacheMutex.Unlock() - epochStats.Validators.ValidatorsStatsMutex.Lock() - for idx := 0; idx < len(epochValidators.Data); idx++ { - validator := epochValidators.Data[idx] - epochStats.Validators.ValidatorBalances[uint64(validator.Index)] = uint64(validator.Validator.EffectiveBalance) - if !strings.HasPrefix(validator.Status, "active") { - continue - } - epochStats.Validators.ValidatorCount++ - epochStats.Validators.ValidatorBalance += uint64(validator.Balance) - epochStats.Validators.EligibleAmount += uint64(validator.Validator.EffectiveBalance) + if firstBlock.Slot == firstSlot { + epochTarget = firstBlock.Root + } else { + epochTarget = firstBlock.header.Message.ParentRoot } - epochStats.Validators.ValidatorsStatsMutex.Unlock() } -} -func (indexer *Indexer) processIndexing() { - // process old epochs - currentEpoch := utils.EpochOfSlot(indexer.state.lastHeadBlock) - maxProcessEpoch := int64(currentEpoch) - int64(indexer.epochProcessingDelay) - for indexer.state.lastProcessedEpoch < maxProcessEpoch { - processEpoch := indexer.state.lastProcessedEpoch + 1 - indexer.processEpoch(uint64(processEpoch)) - indexer.state.lastProcessedEpoch = processEpoch + // get canonical blocks + canonicalMap := indexer.indexerCache.getCanonicalBlockMap(epoch, headRoot) + // append next epoch blocks (needed for vote aggregation) + for slot, block := range indexer.indexerCache.getCanonicalBlockMap(epoch+1, headRoot) { + canonicalMap[slot] = block } -} -func (indexer *Indexer) processCacheCleanup() { - currentEpoch := utils.EpochOfSlot(indexer.state.lastHeadBlock) - lowestCachedSlot := indexer.state.lowestCachedSlot - - // cleanup cache - cleanEpoch := currentEpoch - uint64(indexer.inMemoryEpochs) - if lowestCachedSlot >= 0 && lowestCachedSlot < int64((cleanEpoch+1)*utils.Config.Chain.Config.SlotsPerEpoch) { - indexer.state.cacheMutex.Lock() - defer indexer.state.cacheMutex.Unlock() - for indexer.state.lowestCachedSlot < int64((cleanEpoch+1)*utils.Config.Chain.Config.SlotsPerEpoch) { - cacheSlot := uint64(indexer.state.lowestCachedSlot) - if indexer.state.cachedBlocks[cacheSlot] != nil { - logger.Debugf("Dropped cached block (epoch %v, slot %v)", utils.EpochOfSlot(cacheSlot), indexer.state.lowestCachedSlot) - delete(indexer.state.cachedBlocks, cacheSlot) - } - indexer.state.lowestCachedSlot++ - } - if indexer.state.epochStats[cleanEpoch] != nil { - epochStats := indexer.state.epochStats[cleanEpoch] - indexer.rpcClient.AddCachedEpochAssignments(cleanEpoch, epochStats.Assignments) - delete(indexer.state.epochStats, cleanEpoch) - } - } + // calculate votes + return aggregateEpochVotes(canonicalMap, epoch, epochStats, epochTarget, false) } -func (indexer *Indexer) processEpoch(epoch uint64) { - indexer.state.cacheMutex.RLock() - defer indexer.state.cacheMutex.RUnlock() - - logger.Infof("Process epoch %v", epoch) - firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch - lastSlot := firstSlot + utils.Config.Chain.Config.SlotsPerEpoch - 1 - epochStats := indexer.state.epochStats[epoch] - - // await full epochStats (might not be ready in some edge cases) - epochStats.Validators.ValidatorsReadyMutex.Lock() - epochStats.Validators.ValidatorsReadyMutex.Unlock() +func (indexer *Indexer) BuildLiveEpoch(epoch uint64) *dbtypes.Epoch { + headSlot, headRoot := indexer.GetCanonicalHead() + headEpoch := utils.EpochOfSlot(headSlot) - var epochTarget []byte -slotLoop: - for slot := firstSlot; slot <= lastSlot; slot++ { - blocks := indexer.state.cachedBlocks[slot] - if blocks == nil { - continue - } - for bidx := 0; bidx < len(blocks); bidx++ { - block := blocks[bidx] - if !block.Orphaned { - if slot == firstSlot { - epochTarget = block.Header.Data.Root - } else { - epochTarget = block.Header.Data.Header.Message.ParentRoot - } - break slotLoop - } - } - } - if epochTarget == nil { - logger.Warnf("Counld not find epoch %v target (no block found)", epoch) + epochStats := indexer.getCachedEpochStats(epoch, headRoot) + if epochStats == nil { + return nil } - epochVotes := aggregateEpochVotes(indexer.state.cachedBlocks, epoch, epochStats, epochTarget, false) - - // save to db - if indexer.writeDb { - tx, err := db.WriterDb.Beginx() - if err != nil { - logger.Errorf("error starting db transactions: %v", err) - return - } - defer tx.Rollback() + epochStats.dbEpochMutex.Lock() + defer epochStats.dbEpochMutex.Unlock() - err = persistEpochData(epoch, indexer.state.cachedBlocks, epochStats, epochVotes, tx) - if err != nil { - logger.Errorf("error persisting epoch data to db: %v", err) - } - - if indexer.synchronizer == nil || !indexer.synchronizer.running { - err = db.SetExplorerState("indexer.syncstate", &dbtypes.IndexerSyncState{ - Epoch: epoch, - }, tx) - if err != nil { - logger.Errorf("error while updating sync state: %v", err) - } - } + if epochStats.dbEpochCache != nil { + return epochStats.dbEpochCache + } - if err := tx.Commit(); err != nil { - logger.Errorf("error committing db transaction: %v", err) - return - } + logger.Debugf("Build live epoch data %v", epoch) + canonicalMap := indexer.indexerCache.getCanonicalBlockMap(epoch, headRoot) + epochVotes := indexer.getEpochVotes(epoch, epochStats) + dbEpoch := buildDbEpoch(epoch, canonicalMap, epochStats, epochVotes, nil) + if headEpoch > epoch && headEpoch-epoch > 2 { + epochStats.dbEpochCache = dbEpoch } + return dbEpoch +} - logger.Infof("Epoch %v stats: %v validators (%v)", epoch, epochStats.Validators.ValidatorCount, epochStats.Validators.EligibleAmount) - logger.Infof("Epoch %v votes: target %v + %v = %v", epoch, epochVotes.currentEpoch.targetVoteAmount, epochVotes.nextEpoch.targetVoteAmount, epochVotes.currentEpoch.targetVoteAmount+epochVotes.nextEpoch.targetVoteAmount) - logger.Infof("Epoch %v votes: head %v + %v = %v", epoch, epochVotes.currentEpoch.headVoteAmount, epochVotes.nextEpoch.headVoteAmount, epochVotes.currentEpoch.headVoteAmount+epochVotes.nextEpoch.headVoteAmount) - logger.Infof("Epoch %v votes: total %v + %v = %v", epoch, epochVotes.currentEpoch.totalVoteAmount, epochVotes.nextEpoch.totalVoteAmount, epochVotes.currentEpoch.totalVoteAmount+epochVotes.nextEpoch.totalVoteAmount) +func (indexer *Indexer) BuildLiveBlock(block *CacheBlock) *dbtypes.Block { + block.dbBlockMutex.Lock() + defer block.dbBlockMutex.Unlock() - for slot := firstSlot; slot <= lastSlot; slot++ { - blocks := indexer.state.cachedBlocks[slot] - if blocks == nil { - continue - } - for bidx := 0; bidx < len(blocks); bidx++ { - block := blocks[bidx] - if block.Orphaned { - logger.Infof("Epoch %v orphaned block %v.%v: %v", epoch, slot, bidx, block.Header.Data.Root) - } + if block.dbBlockCache == nil { + logger.Debugf("Build live block data 0x%x", block.Root) + header := block.GetHeader() + epoch := utils.EpochOfSlot(uint64(header.Message.Slot)) + epochStats := indexer.GetCachedEpochStats(epoch) + if epochStats == nil { + return nil } + block.dbBlockCache = buildDbBlock(block, epochStats) } + block.dbBlockCache.Orphaned = !block.IsCanonical(indexer, nil) + return block.dbBlockCache } diff --git a/indexer/synchronizer.go b/indexer/synchronizer.go index 095ebaf9..4a91e6cd 100644 --- a/indexer/synchronizer.go +++ b/indexer/synchronizer.go @@ -1,16 +1,13 @@ package indexer import ( - "strings" "sync" "time" - "github.com/sirupsen/logrus" - "github.com/pk910/light-beaconchain-explorer/db" "github.com/pk910/light-beaconchain-explorer/dbtypes" - "github.com/pk910/light-beaconchain-explorer/rpctypes" "github.com/pk910/light-beaconchain-explorer/utils" + "github.com/sirupsen/logrus" ) var synclogger = logrus.StandardLogger().WithField("module", "synchronizer") @@ -23,7 +20,7 @@ type synchronizerState struct { killChan chan bool currentEpoch uint64 cachedSlot uint64 - cachedBlocks map[uint64][]*BlockInfo + cachedBlocks map[uint64]*CacheBlock } func newSynchronizer(indexer *Indexer) *synchronizerState { @@ -52,13 +49,13 @@ func (sync *synchronizerState) startSync(startEpoch uint64) { sync.stateMutex.Unlock() // wait for synchronizer to stop sync.runMutex.Lock() - sync.runMutex.Unlock() + defer sync.runMutex.Unlock() // start synchronizer sync.stateMutex.Lock() defer sync.stateMutex.Unlock() if sync.running { - synclogger.Errorf("Cannot start synchronizer: already running") + synclogger.Errorf("cannot start synchronizer: already running") return } sync.currentEpoch = startEpoch @@ -68,35 +65,37 @@ func (sync *synchronizerState) startSync(startEpoch uint64) { } func (sync *synchronizerState) runSync() { + defer func() { + if err := recover(); err != nil { + synclogger.Errorf("uncaught panic in runSync subroutine: %v", err) + } + }() + sync.runMutex.Lock() defer sync.runMutex.Unlock() - sync.cachedBlocks = make(map[uint64][]*BlockInfo) + sync.cachedBlocks = make(map[uint64]*CacheBlock) sync.cachedSlot = 0 isComplete := false - synclogger.Infof("Synchronization started. Head epoch: %v", sync.currentEpoch) + synclogger.Infof("synchronization started. Head epoch: %v", sync.currentEpoch) for { // synchronize next epoch - sync.stateMutex.Lock() syncEpoch := sync.currentEpoch - sync.stateMutex.Unlock() - synclogger.Infof("Synchronising epoch %v", syncEpoch) + synclogger.Infof("synchronizing epoch %v", syncEpoch) if sync.syncEpoch(syncEpoch) { - sync.indexer.state.cacheMutex.Lock() - indexerEpoch := sync.indexer.state.lastProcessedEpoch - sync.indexer.state.cacheMutex.Unlock() + finalizedEpoch, _ := sync.indexer.indexerCache.getFinalizedHead() sync.stateMutex.Lock() syncEpoch++ sync.currentEpoch = syncEpoch sync.stateMutex.Unlock() - if int64(syncEpoch) > indexerEpoch { + if int64(syncEpoch) > finalizedEpoch { isComplete = true break } } else { - synclogger.Warnf("Synchronisation of epoch %v failed", syncEpoch) + synclogger.Warnf("synchronization of epoch %v failed", syncEpoch) } if sync.checkKillChan(time.Duration(utils.Config.Indexer.SyncEpochCooldown) * time.Second) { @@ -105,9 +104,9 @@ func (sync *synchronizerState) runSync() { } if isComplete { - synclogger.Infof("Synchronization complete. Head epoch: %v", sync.currentEpoch) + synclogger.Infof("synchronization complete. Head epoch: %v", sync.currentEpoch) } else { - synclogger.Infof("Synchronization aborted. Head epoch: %v", sync.currentEpoch) + synclogger.Infof("synchronization aborted. Head epoch: %v", sync.currentEpoch) } sync.running = false @@ -136,10 +135,12 @@ func (sync *synchronizerState) syncEpoch(syncEpoch uint64) bool { return true } + client := sync.indexer.getReadyClient(true, nil) + // load epoch assignments - epochAssignments, err := sync.indexer.rpcClient.GetEpochAssignments(syncEpoch) + epochAssignments, err := client.rpcClient.GetEpochAssignments(syncEpoch) if err != nil { - synclogger.Errorf("Error fetching epoch %v duties: %v", syncEpoch, err) + synclogger.Errorf("error fetching epoch %v duties: %v", syncEpoch, err) } if sync.checkKillChan(0) { @@ -153,26 +154,28 @@ func (sync *synchronizerState) syncEpoch(syncEpoch uint64) bool { if sync.cachedSlot >= slot { continue } - header, err := sync.indexer.rpcClient.GetBlockHeaderBySlot(slot) + headerRsp, err := client.rpcClient.GetBlockHeaderBySlot(slot) if err != nil { - synclogger.Errorf("Error fetching slot %v header: %v", slot, err) + synclogger.Errorf("error fetching slot %v header: %v", slot, err) return false } - if header == nil { + if headerRsp == nil { continue } if sync.checkKillChan(0) { return false } - block, err := sync.indexer.rpcClient.GetBlockBodyByBlockroot(header.Data.Root) + blockRsp, err := client.rpcClient.GetBlockBodyByBlockroot(headerRsp.Data.Root) if err != nil { - synclogger.Errorf("Error fetching slot %v block: %v", slot, err) + synclogger.Errorf("error fetching slot %v block: %v", slot, err) return false } - sync.cachedBlocks[slot] = []*BlockInfo{{ - Header: header, - Block: block, - }} + sync.cachedBlocks[slot] = &CacheBlock{ + Root: headerRsp.Data.Root, + Slot: slot, + header: &headerRsp.Data.Header, + block: &blockRsp.Data, + } } sync.cachedSlot = lastSlot @@ -181,60 +184,38 @@ func (sync *synchronizerState) syncEpoch(syncEpoch uint64) bool { } // load epoch stats - epochStats := EpochStats{ - Assignments: epochAssignments, - Validators: &EpochValidators{ - ValidatorCount: 0, - EligibleAmount: 0, - ValidatorBalances: make(map[uint64]uint64), - }, - } - if epochAssignments != nil { - var epochValidators *rpctypes.StandardV1StateValidatorsResponse - var err error - if epochAssignments.DependendIsGenesis { - epochValidators, err = sync.indexer.rpcClient.GetGenesisValidators() - } else { - epochValidators, err = sync.indexer.rpcClient.GetStateValidators(epochAssignments.DependendState) - } - if err != nil { - logger.Errorf("Error fetching epoch %v validators (state: %v): %v", syncEpoch, epochAssignments.DependendState, err) - } else { - for idx := 0; idx < len(epochValidators.Data); idx++ { - validator := epochValidators.Data[idx] - epochStats.Validators.ValidatorBalances[uint64(validator.Index)] = uint64(validator.Validator.EffectiveBalance) - if !strings.HasPrefix(validator.Status, "active") { - continue - } - epochStats.Validators.ValidatorCount++ - epochStats.Validators.ValidatorBalance += uint64(validator.Balance) - epochStats.Validators.EligibleAmount += uint64(validator.Validator.EffectiveBalance) - } - } + epochStats := &EpochStats{ + Epoch: syncEpoch, + DependentRoot: epochAssignments.DependendRoot, + proposerAssignments: epochAssignments.ProposerAssignments, + attestorAssignments: epochAssignments.AttestorAssignments, + syncAssignments: epochAssignments.SyncAssignments, } + epochStats.loadValidatorStats(client, epochAssignments.DependendStateRef) + if sync.checkKillChan(0) { return false } // process epoch vote aggregations - var firstBlock *BlockInfo + var firstBlock *CacheBlock lastSlot = firstSlot + (utils.Config.Chain.Config.SlotsPerEpoch) - 1 for slot := firstSlot; slot <= lastSlot; slot++ { if sync.cachedBlocks[slot] != nil { - firstBlock = sync.cachedBlocks[slot][0] + firstBlock = sync.cachedBlocks[slot] break } } var targetRoot []byte if firstBlock != nil { - if uint64(firstBlock.Header.Data.Header.Message.Slot) == firstSlot { - targetRoot = firstBlock.Header.Data.Root + if uint64(firstBlock.header.Message.Slot) == firstSlot { + targetRoot = firstBlock.Root } else { - targetRoot = firstBlock.Header.Data.Header.Message.ParentRoot + targetRoot = firstBlock.header.Message.ParentRoot } } - epochVotes := aggregateEpochVotes(sync.cachedBlocks, syncEpoch, &epochStats, targetRoot, false) + epochVotes := aggregateEpochVotes(sync.cachedBlocks, syncEpoch, epochStats, targetRoot, false) // save blocks tx, err := db.WriterDb.Beginx() @@ -244,7 +225,7 @@ func (sync *synchronizerState) syncEpoch(syncEpoch uint64) bool { } defer tx.Rollback() - err = persistEpochData(syncEpoch, sync.cachedBlocks, &epochStats, epochVotes, tx) + err = persistEpochData(syncEpoch, sync.cachedBlocks, epochStats, epochVotes, tx) if err != nil { logger.Errorf("error persisting epoch data to db: %v", err) return false diff --git a/indexer/types.go b/indexer/types.go new file mode 100644 index 00000000..4497b158 --- /dev/null +++ b/indexer/types.go @@ -0,0 +1,8 @@ +package indexer + +type HeadFork struct { + Slot uint64 + Root []byte + ReadyClients []*IndexerClient + AllClients []*IndexerClient +} diff --git a/indexer/votes.go b/indexer/votes.go index e750c1a1..fde48931 100644 --- a/indexer/votes.go +++ b/indexer/votes.go @@ -18,10 +18,11 @@ type EpochVotes struct { headVoteAmount uint64 totalVoteAmount uint64 } + VoteCounts bool ActivityMap map[uint64]bool } -func aggregateEpochVotes(blockMap map[uint64][]*BlockInfo, epoch uint64, epochStats *EpochStats, targetRoot []byte, currentOnly bool) *EpochVotes { +func aggregateEpochVotes(blockMap map[uint64]*CacheBlock, epoch uint64, epochStats *EpochStats, targetRoot []byte, currentOnly bool) *EpochVotes { firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch lastSlot := firstSlot + utils.Config.Chain.Config.SlotsPerEpoch - 1 if !currentOnly { @@ -29,81 +30,89 @@ func aggregateEpochVotes(blockMap map[uint64][]*BlockInfo, epoch uint64, epochSt lastSlot += utils.Config.Chain.Config.SlotsPerEpoch } - epochStats.Validators.ValidatorsStatsMutex.RLock() - defer epochStats.Validators.ValidatorsStatsMutex.RUnlock() + // avait all lazy loaded data is available + epochStats.dutiesMutex.RLock() + defer epochStats.dutiesMutex.RUnlock() + epochStats.validatorsMutex.RLock() + defer epochStats.validatorsMutex.RUnlock() votes := EpochVotes{ ActivityMap: map[uint64]bool{}, + VoteCounts: epochStats.validatorStats == nil, } votedBitsets := make(map[string][]byte) for slot := firstSlot; slot <= lastSlot; slot++ { - blocks := blockMap[slot] - if blocks == nil { + block := blockMap[slot] + if block == nil { continue } - for bidx := 0; bidx < len(blocks); bidx++ { - block := blocks[bidx] - if !block.Orphaned { - isNextEpoch := utils.EpochOfSlot(slot) > epoch - for _, att := range block.Block.Data.Message.Body.Attestations { - if utils.EpochOfSlot(uint64(att.Data.Slot)) != epoch { - continue - } - - attKey := fmt.Sprintf("%v-%v", uint64(att.Data.Slot), uint64(att.Data.Index)) - voteAmount := uint64(0) - voteBitset := att.AggregationBits - votedBitset := votedBitsets[attKey] - if epochStats.Assignments != nil { - voteValidators := epochStats.Assignments.AttestorAssignments[attKey] - for bitIdx, validatorIdx := range voteValidators { - if votedBitset != nil && utils.BitAtVector(votedBitset, bitIdx) { - // don't "double count" votes, if a attestation aggregation has been extended and re-included - continue - } - if utils.BitAtVector(voteBitset, bitIdx) { - voteAmount += uint64(epochStats.Validators.ValidatorBalances[validatorIdx]) - votes.ActivityMap[validatorIdx] = true - } - } - } + blockBody := block.GetBlockBody() + if blockBody == nil { + continue + } - if votedBitset != nil { - // merge bitsets - for i := 0; i < len(votedBitset); i++ { - votedBitset[i] |= voteBitset[i] - } - } else { - votedBitset = make([]byte, len(voteBitset)) - copy(votedBitset, voteBitset) - votedBitsets[attKey] = voteBitset - } + isNextEpoch := utils.EpochOfSlot(slot) > epoch + for _, att := range blockBody.Message.Body.Attestations { + if utils.EpochOfSlot(uint64(att.Data.Slot)) != epoch { + continue + } - if bytes.Equal(att.Data.Target.Root, targetRoot) { - if isNextEpoch { - votes.nextEpoch.targetVoteAmount += voteAmount - } else { - votes.currentEpoch.targetVoteAmount += voteAmount - } - } else { - //logger.Infof("vote target missmatch %v != 0x%x", att.Data.Target.Root, targetRoot) + attKey := fmt.Sprintf("%v-%v", uint64(att.Data.Slot), uint64(att.Data.Index)) + voteAmount := uint64(0) + voteBitset := att.AggregationBits + votedBitset := votedBitsets[attKey] + if epochStats.attestorAssignments != nil { + voteValidators := epochStats.attestorAssignments[attKey] + for bitIdx, validatorIdx := range voteValidators { + if votedBitset != nil && utils.BitAtVector(votedBitset, bitIdx) { + // don't "double count" votes, if a attestation aggregation has been extended and re-included + continue } - if bytes.Equal(att.Data.BeaconBlockRoot, block.Header.Data.Header.Message.ParentRoot) { - if isNextEpoch { - votes.nextEpoch.headVoteAmount += voteAmount + if utils.BitAtVector(voteBitset, bitIdx) { + if epochStats.validatorStats != nil { + voteAmount += uint64(epochStats.validatorStats.ValidatorBalances[validatorIdx]) } else { - votes.currentEpoch.headVoteAmount += voteAmount + voteAmount += 1 } + votes.ActivityMap[validatorIdx] = true } - if isNextEpoch { - votes.nextEpoch.totalVoteAmount += voteAmount - } else { - votes.currentEpoch.totalVoteAmount += voteAmount - } } } + + if votedBitset != nil { + // merge bitsets + for i := 0; i < len(votedBitset); i++ { + votedBitset[i] |= voteBitset[i] + } + } else { + votedBitset = make([]byte, len(voteBitset)) + copy(votedBitset, voteBitset) + votedBitsets[attKey] = voteBitset + } + + if bytes.Equal(att.Data.Target.Root, targetRoot) { + if isNextEpoch { + votes.nextEpoch.targetVoteAmount += voteAmount + } else { + votes.currentEpoch.targetVoteAmount += voteAmount + } + } /*else { + logger.Infof("vote target missmatch %v != 0x%x", att.Data.Target.Root, targetRoot) + }*/ + if bytes.Equal(att.Data.BeaconBlockRoot, block.GetParentRoot()) { + if isNextEpoch { + votes.nextEpoch.headVoteAmount += voteAmount + } else { + votes.currentEpoch.headVoteAmount += voteAmount + } + } + if isNextEpoch { + votes.nextEpoch.totalVoteAmount += voteAmount + } else { + votes.currentEpoch.totalVoteAmount += voteAmount + } } } diff --git a/indexer/writeDb.go b/indexer/writeDb.go index 24c701f1..156e1ee5 100644 --- a/indexer/writeDb.go +++ b/indexer/writeDb.go @@ -9,7 +9,7 @@ import ( "github.com/pk910/light-beaconchain-explorer/utils" ) -func persistEpochData(epoch uint64, blockMap map[uint64][]*BlockInfo, epochStats *EpochStats, epochVotes *EpochVotes, tx *sqlx.Tx) error { +func persistEpochData(epoch uint64, blockMap map[uint64]*CacheBlock, epochStats *EpochStats, epochVotes *EpochVotes, tx *sqlx.Tx) error { commitTx := false if tx == nil { var err error @@ -22,26 +22,21 @@ func persistEpochData(epoch uint64, blockMap map[uint64][]*BlockInfo, epochStats commitTx = true } - dbEpoch := buildDbEpoch(epoch, blockMap, epochStats, epochVotes, func(block *BlockInfo) { + dbEpoch := buildDbEpoch(epoch, blockMap, epochStats, epochVotes, func(block *CacheBlock) { // insert block dbBlock := buildDbBlock(block, epochStats) db.InsertBlock(dbBlock, tx) - - // insert orphaned block - if dbBlock.Orphaned { - db.InsertOrphanedBlock(BuildOrphanedBlock(block), tx) - } }) // insert slot assignments firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch - if epochStats.Assignments != nil { + if epochStats.proposerAssignments != nil { slotAssignments := make([]*dbtypes.SlotAssignment, utils.Config.Chain.Config.SlotsPerEpoch) for slotIdx := uint64(0); slotIdx < utils.Config.Chain.Config.SlotsPerEpoch; slotIdx++ { slot := firstSlot + slotIdx slotAssignments[slotIdx] = &dbtypes.SlotAssignment{ Slot: slot, - Proposer: epochStats.Assignments.ProposerAssignments[slot], + Proposer: epochStats.proposerAssignments[slot], } } db.InsertSlotAssignments(slotAssignments, tx) @@ -60,28 +55,39 @@ func persistEpochData(epoch uint64, blockMap map[uint64][]*BlockInfo, epochStats return nil } -func buildDbBlock(block *BlockInfo, epochStats *EpochStats) *dbtypes.Block { +func buildDbBlock(block *CacheBlock, epochStats *EpochStats) *dbtypes.Block { + blockBody := block.GetBlockBody() + if blockBody == nil { + logger.Errorf("Error while aggregating epoch blocks: canonical block body not found: %v", block.Slot) + return nil + } + dbBlock := dbtypes.Block{ - Root: block.Header.Data.Root, - Slot: uint64(block.Header.Data.Header.Message.Slot), - ParentRoot: block.Header.Data.Header.Message.ParentRoot, - StateRoot: block.Header.Data.Header.Message.StateRoot, - Orphaned: block.Orphaned, - Proposer: uint64(block.Block.Data.Message.ProposerIndex), - Graffiti: block.Block.Data.Message.Body.Graffiti, - GraffitiText: utils.GraffitiToString(block.Block.Data.Message.Body.Graffiti), - AttestationCount: uint64(len(block.Block.Data.Message.Body.Attestations)), - DepositCount: uint64(len(block.Block.Data.Message.Body.Deposits)), - ExitCount: uint64(len(block.Block.Data.Message.Body.VoluntaryExits)), - AttesterSlashingCount: uint64(len(block.Block.Data.Message.Body.AttesterSlashings)), - ProposerSlashingCount: uint64(len(block.Block.Data.Message.Body.ProposerSlashings)), - BLSChangeCount: uint64(len(block.Block.Data.Message.Body.SignedBLSToExecutionChange)), + Root: block.Root, + Slot: uint64(block.header.Message.Slot), + ParentRoot: block.header.Message.ParentRoot, + StateRoot: block.header.Message.StateRoot, + Proposer: uint64(blockBody.Message.ProposerIndex), + Graffiti: blockBody.Message.Body.Graffiti, + GraffitiText: utils.GraffitiToString(blockBody.Message.Body.Graffiti), + AttestationCount: uint64(len(blockBody.Message.Body.Attestations)), + DepositCount: uint64(len(blockBody.Message.Body.Deposits)), + ExitCount: uint64(len(blockBody.Message.Body.VoluntaryExits)), + AttesterSlashingCount: uint64(len(blockBody.Message.Body.AttesterSlashings)), + ProposerSlashingCount: uint64(len(blockBody.Message.Body.ProposerSlashings)), + BLSChangeCount: uint64(len(blockBody.Message.Body.SignedBLSToExecutionChange)), } - syncAggregate := block.Block.Data.Message.Body.SyncAggregate - if syncAggregate != nil && epochStats != nil && epochStats.Assignments != nil && epochStats.Assignments.SyncAssignments != nil { + syncAggregate := blockBody.Message.Body.SyncAggregate + if syncAggregate != nil { + var assignedCount int + if epochStats != nil && epochStats.syncAssignments != nil { + assignedCount = len(epochStats.syncAssignments) + } else { + // this is not accurate, but best we can get without epoch assignments + assignedCount = len(syncAggregate.SyncCommitteeBits) * 8 + } votedCount := 0 - assignedCount := len(epochStats.Assignments.SyncAssignments) for i := 0; i < assignedCount; i++ { if utils.BitAtVector(syncAggregate.SyncCommitteeBits, i) { votedCount++ @@ -90,7 +96,7 @@ func buildDbBlock(block *BlockInfo, epochStats *EpochStats) *dbtypes.Block { dbBlock.SyncParticipation = float32(votedCount) / float32(assignedCount) } - if executionPayload := block.Block.Data.Message.Body.ExecutionPayload; executionPayload != nil { + if executionPayload := blockBody.Message.Body.ExecutionPayload; executionPayload != nil { dbBlock.EthTransactionCount = uint64(len(executionPayload.Transactions)) dbBlock.EthBlockNumber = uint64(executionPayload.BlockNumber) dbBlock.EthBlockHash = executionPayload.BlockHash @@ -106,65 +112,65 @@ func buildDbBlock(block *BlockInfo, epochStats *EpochStats) *dbtypes.Block { return &dbBlock } -func buildDbEpoch(epoch uint64, blockMap map[uint64][]*BlockInfo, epochStats *EpochStats, epochVotes *EpochVotes, blockFn func(block *BlockInfo)) *dbtypes.Epoch { +func buildDbEpoch(epoch uint64, blockMap map[uint64]*CacheBlock, epochStats *EpochStats, epochVotes *EpochVotes, blockFn func(block *CacheBlock)) *dbtypes.Epoch { firstSlot := epoch * utils.Config.Chain.Config.SlotsPerEpoch lastSlot := firstSlot + (utils.Config.Chain.Config.SlotsPerEpoch) - 1 totalSyncAssigned := 0 totalSyncVoted := 0 dbEpoch := dbtypes.Epoch{ - Epoch: epoch, - ValidatorCount: epochStats.Validators.ValidatorCount, - ValidatorBalance: epochStats.Validators.ValidatorBalance, - Eligible: epochStats.Validators.EligibleAmount, - VotedTarget: epochVotes.currentEpoch.targetVoteAmount + epochVotes.nextEpoch.targetVoteAmount, - VotedHead: epochVotes.currentEpoch.headVoteAmount + epochVotes.nextEpoch.headVoteAmount, - VotedTotal: epochVotes.currentEpoch.totalVoteAmount + epochVotes.nextEpoch.totalVoteAmount, + Epoch: epoch, + VotedTarget: epochVotes.currentEpoch.targetVoteAmount + epochVotes.nextEpoch.targetVoteAmount, + VotedHead: epochVotes.currentEpoch.headVoteAmount + epochVotes.nextEpoch.headVoteAmount, + VotedTotal: epochVotes.currentEpoch.totalVoteAmount + epochVotes.nextEpoch.totalVoteAmount, + } + if epochStats.validatorStats != nil { + dbEpoch.ValidatorCount = epochStats.validatorStats.ValidatorCount + dbEpoch.ValidatorBalance = epochStats.validatorStats.ValidatorBalance + dbEpoch.Eligible = epochStats.validatorStats.EligibleAmount } // aggregate blocks for slot := firstSlot; slot <= lastSlot; slot++ { - blocks := blockMap[slot] - if blocks != nil { - for bidx := 0; bidx < len(blocks); bidx++ { - block := blocks[bidx] - if blockFn != nil { - blockFn(block) - } - - if block.Orphaned { - dbEpoch.OrphanedCount++ - continue - } + block := blockMap[slot] + + if block != nil { + dbEpoch.BlockCount++ + blockBody := block.GetBlockBody() + if blockBody == nil { + logger.Errorf("Error while aggregating epoch blocks: canonical block body not found: %v", block.Slot) + continue + } + if blockFn != nil { + blockFn(block) + } - dbEpoch.BlockCount++ - dbEpoch.AttestationCount += uint64(len(block.Block.Data.Message.Body.Attestations)) - dbEpoch.DepositCount += uint64(len(block.Block.Data.Message.Body.Deposits)) - dbEpoch.ExitCount += uint64(len(block.Block.Data.Message.Body.VoluntaryExits)) - dbEpoch.AttesterSlashingCount += uint64(len(block.Block.Data.Message.Body.AttesterSlashings)) - dbEpoch.ProposerSlashingCount += uint64(len(block.Block.Data.Message.Body.ProposerSlashings)) - dbEpoch.BLSChangeCount += uint64(len(block.Block.Data.Message.Body.SignedBLSToExecutionChange)) - - syncAggregate := block.Block.Data.Message.Body.SyncAggregate - if syncAggregate != nil && epochStats.Assignments != nil && epochStats.Assignments.SyncAssignments != nil { - votedCount := 0 - assignedCount := len(epochStats.Assignments.SyncAssignments) - for i := 0; i < assignedCount; i++ { - if utils.BitAtVector(syncAggregate.SyncCommitteeBits, i) { - votedCount++ - } + dbEpoch.AttestationCount += uint64(len(blockBody.Message.Body.Attestations)) + dbEpoch.DepositCount += uint64(len(blockBody.Message.Body.Deposits)) + dbEpoch.ExitCount += uint64(len(blockBody.Message.Body.VoluntaryExits)) + dbEpoch.AttesterSlashingCount += uint64(len(blockBody.Message.Body.AttesterSlashings)) + dbEpoch.ProposerSlashingCount += uint64(len(blockBody.Message.Body.ProposerSlashings)) + dbEpoch.BLSChangeCount += uint64(len(blockBody.Message.Body.SignedBLSToExecutionChange)) + + syncAggregate := blockBody.Message.Body.SyncAggregate + if syncAggregate != nil && epochStats.syncAssignments != nil { + votedCount := 0 + assignedCount := len(epochStats.syncAssignments) + for i := 0; i < assignedCount; i++ { + if utils.BitAtVector(syncAggregate.SyncCommitteeBits, i) { + votedCount++ } - totalSyncAssigned += assignedCount - totalSyncVoted += votedCount } + totalSyncAssigned += assignedCount + totalSyncVoted += votedCount + } - if executionPayload := block.Block.Data.Message.Body.ExecutionPayload; executionPayload != nil { - dbEpoch.EthTransactionCount += uint64(len(executionPayload.Transactions)) - if executionPayload.Withdrawals != nil { - dbEpoch.WithdrawCount += uint64(len(executionPayload.Withdrawals)) - for _, withdrawal := range executionPayload.Withdrawals { - dbEpoch.WithdrawAmount += uint64(withdrawal.Amount) - } + if executionPayload := blockBody.Message.Body.ExecutionPayload; executionPayload != nil { + dbEpoch.EthTransactionCount += uint64(len(executionPayload.Transactions)) + if executionPayload.Withdrawals != nil { + dbEpoch.WithdrawCount += uint64(len(executionPayload.Withdrawals)) + for _, withdrawal := range executionPayload.Withdrawals { + dbEpoch.WithdrawAmount += uint64(withdrawal.Amount) } } } diff --git a/rpc/beaconapi.go b/rpc/beaconapi.go index 8716123f..75beb3ab 100644 --- a/rpc/beaconapi.go +++ b/rpc/beaconapi.go @@ -4,13 +4,11 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "strconv" - "sync" "time" - "github.com/ethereum/go-ethereum/common/lru" "github.com/sirupsen/logrus" "github.com/pk910/light-beaconchain-explorer/rpctypes" @@ -20,19 +18,13 @@ import ( var logger = logrus.StandardLogger().WithField("module", "rpc") type BeaconClient struct { - endpoint string - assignmentsCache *lru.Cache[uint64, *rpctypes.EpochAssignments] - assignmentsCacheMux sync.Mutex + endpoint string } // NewBeaconClient is used to create a new beacon client -func NewBeaconClient(endpoint string, assignmentsCacheSize int) (*BeaconClient, error) { - if assignmentsCacheSize < 10 { - assignmentsCacheSize = 10 - } +func NewBeaconClient(endpoint string) (*BeaconClient, error) { client := &BeaconClient{ - endpoint: endpoint, - assignmentsCache: lru.NewCache[uint64, *rpctypes.EpochAssignments](assignmentsCacheSize), + endpoint: endpoint, } return client, nil @@ -51,7 +43,7 @@ func (bc *BeaconClient) get(url string) ([]byte, error) { defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { if resp.StatusCode == http.StatusNotFound { @@ -78,7 +70,7 @@ func (bc *BeaconClient) getJson(url string, returnValue interface{}) error { if resp.StatusCode == http.StatusNotFound { return errNotFound } - data, _ := ioutil.ReadAll(resp.Body) + data, _ := io.ReadAll(resp.Body) return fmt.Errorf("url: %v, error-response: %s", url, data) } @@ -127,6 +119,19 @@ func (bc *BeaconClient) GetNodeSyncing() (*rpctypes.StandardV1NodeSyncingRespons return &parsedSyncingStatus, nil } +func (bc *BeaconClient) GetNodeVersion() (*rpctypes.StandardV1NodeVersionResponse, error) { + var parsedRsp rpctypes.StandardV1NodeVersionResponse + err := bc.getJson(fmt.Sprintf("%s/eth/v1/node/syncing", bc.endpoint), &parsedRsp) + if err != nil { + if err == errNotFound { + // no block found + return nil, nil + } + return nil, fmt.Errorf("error retrieving node version: %v", err) + } + return &parsedRsp, nil +} + func (bc *BeaconClient) GetLatestBlockHead() (*rpctypes.StandardV1BeaconHeaderResponse, error) { resHeaders, err := bc.get(fmt.Sprintf("%s/eth/v1/beacon/headers/head", bc.endpoint)) if err != nil { @@ -231,37 +236,29 @@ func (bc *BeaconClient) GetProposerDuties(epoch uint64) (*rpctypes.StandardV1Pro return &parsedProposerResponse, nil } -func (bc *BeaconClient) GetEpochAssignments(epoch uint64) (*rpctypes.EpochAssignments, error) { - currentEpoch := utils.TimeToEpoch(time.Now()) - // don't cache current & last epoch as these might change due to reorgs - // the most recent epoch assignments are cached in the indexer anyway - cachable := epoch < uint64(currentEpoch)-1 - if cachable { - bc.assignmentsCacheMux.Lock() - cachedValue, found := bc.assignmentsCache.Get(epoch) - bc.assignmentsCacheMux.Unlock() - if found { - return cachedValue, nil - } - } - - epochAssignments, err := bc.getEpochAssignments(epoch) - if cachable && epochAssignments != nil && err == nil { - bc.assignmentsCacheMux.Lock() - bc.assignmentsCache.Add(epoch, epochAssignments) - bc.assignmentsCacheMux.Unlock() +func (bc *BeaconClient) GetCommitteeDuties(stateRef string, epoch uint64) (*rpctypes.StandardV1CommitteesResponse, error) { + var parsedCommittees rpctypes.StandardV1CommitteesResponse + err := bc.getJson(fmt.Sprintf("%s/eth/v1/beacon/states/%s/committees?epoch=%d", bc.endpoint, stateRef, epoch), &parsedCommittees) + if err != nil { + return nil, fmt.Errorf("error loading committee duties: %v", err) } - return epochAssignments, err + return &parsedCommittees, nil } -func (bc *BeaconClient) AddCachedEpochAssignments(epoch uint64, epochAssignments *rpctypes.EpochAssignments) { - bc.assignmentsCacheMux.Lock() - bc.assignmentsCache.Add(epoch, epochAssignments) - bc.assignmentsCacheMux.Unlock() +func (bc *BeaconClient) GetSyncCommitteeDuties(stateRef string, epoch uint64) (*rpctypes.StandardV1SyncCommitteesResponse, error) { + if epoch < utils.Config.Chain.Config.AltairForkEpoch { + return nil, fmt.Errorf("cannot get sync committee duties for epoch before altair: %v", epoch) + } + var parsedSyncCommittees rpctypes.StandardV1SyncCommitteesResponse + err := bc.getJson(fmt.Sprintf("%s/eth/v1/beacon/states/%s/sync_committees?epoch=%d", bc.endpoint, stateRef, epoch), &parsedSyncCommittees) + if err != nil { + return nil, fmt.Errorf("error loading sync committee duties: %v", err) + } + return &parsedSyncCommittees, nil } // GetEpochAssignments will get the epoch assignments from Lighthouse RPC api -func (bc *BeaconClient) getEpochAssignments(epoch uint64) (*rpctypes.EpochAssignments, error) { +func (bc *BeaconClient) GetEpochAssignments(epoch uint64) (*rpctypes.EpochAssignments, error) { parsedProposerResponse, err := bc.GetProposerDuties(epoch) if err != nil { return nil, err @@ -279,8 +276,7 @@ func (bc *BeaconClient) getEpochAssignments(epoch uint64) (*rpctypes.EpochAssign assignments := &rpctypes.EpochAssignments{ DependendRoot: parsedProposerResponse.DependentRoot, - DependendState: parsedHeader.Data.Header.Message.StateRoot, - DependendIsGenesis: epoch == 0, + DependendStateRef: depStateRoot, ProposerAssignments: make(map[uint64]uint64), AttestorAssignments: make(map[string][]uint64), } @@ -291,8 +287,7 @@ func (bc *BeaconClient) getEpochAssignments(epoch uint64) (*rpctypes.EpochAssign } // Now use the state root to make a consistent committee query - var parsedCommittees rpctypes.StandardV1CommitteesResponse - err = bc.getJson(fmt.Sprintf("%s/eth/v1/beacon/states/%s/committees?epoch=%d", bc.endpoint, depStateRoot, epoch), &parsedCommittees) + parsedCommittees, err := bc.GetCommitteeDuties(depStateRoot, epoch) if err != nil { logger.Errorf("error retrieving committees data: %v", err) } else { @@ -313,13 +308,11 @@ func (bc *BeaconClient) getEpochAssignments(epoch uint64) (*rpctypes.EpochAssign } if epoch >= utils.Config.Chain.Config.AltairForkEpoch { - syncCommitteeState := fmt.Sprintf("%s", depStateRoot) + syncCommitteeState := depStateRoot if epoch > 0 && epoch == utils.Config.Chain.Config.AltairForkEpoch { syncCommitteeState = fmt.Sprintf("%d", utils.Config.Chain.Config.AltairForkEpoch*utils.Config.Chain.Config.SlotsPerEpoch) } - - var parsedSyncCommittees rpctypes.StandardV1SyncCommitteesResponse - err := bc.getJson(fmt.Sprintf("%s/eth/v1/beacon/states/%s/sync_committees?epoch=%d", bc.endpoint, syncCommitteeState, epoch), &parsedSyncCommittees) + parsedSyncCommittees, err := bc.GetSyncCommitteeDuties(syncCommitteeState, epoch) if err != nil { logger.Errorf("error retrieving sync_committees for epoch %v (state: %v): %v", epoch, syncCommitteeState, err) } else { @@ -339,9 +332,9 @@ func (bc *BeaconClient) getEpochAssignments(epoch uint64) (*rpctypes.EpochAssign return assignments, nil } -func (bc *BeaconClient) GetStateValidators(stateroot []byte) (*rpctypes.StandardV1StateValidatorsResponse, error) { +func (bc *BeaconClient) GetStateValidators(stateRef string) (*rpctypes.StandardV1StateValidatorsResponse, error) { var parsedResponse rpctypes.StandardV1StateValidatorsResponse - err := bc.getJson(fmt.Sprintf("%s/eth/v1/beacon/states/0x%x/validators", bc.endpoint, stateroot), &parsedResponse) + err := bc.getJson(fmt.Sprintf("%s/eth/v1/beacon/states/%v/validators", bc.endpoint, stateRef), &parsedResponse) if err != nil { return nil, fmt.Errorf("error retrieving state validators: %v", err) } diff --git a/rpc/beaconstream.go b/rpc/beaconstream.go index b13aad42..6a046caf 100644 --- a/rpc/beaconstream.go +++ b/rpc/beaconstream.go @@ -3,35 +3,49 @@ package rpc import ( "encoding/json" "fmt" + "strings" "sync" "time" "github.com/donovanhide/eventsource" + "github.com/pk910/light-beaconchain-explorer/rpc/eventstream" "github.com/pk910/light-beaconchain-explorer/rpctypes" ) +const ( + StreamBlockEvent uint16 = 0x01 + StreamHeadEvent uint16 = 0x02 + StreamFinalizedEvent uint16 = 0x04 +) + +type BeaconStreamEvent struct { + Event uint16 + Data interface{} +} + type BeaconStream struct { runMutex sync.Mutex running bool + ready bool + events uint16 endpoint string killChan chan bool - CloseChan chan bool - BlockChan chan *rpctypes.StandardV1StreamedBlockEvent - HeadChan chan *rpctypes.StandardV1StreamedHeadEvent + ReadyChan chan bool + EventChan chan *BeaconStreamEvent lastHeadSeen time.Time } -func (bc *BeaconClient) NewBlockStream() *BeaconStream { +func (bc *BeaconClient) NewBlockStream(events uint16) *BeaconStream { blockStream := BeaconStream{ running: true, + events: events, endpoint: bc.endpoint, killChan: make(chan bool), - CloseChan: make(chan bool), - BlockChan: make(chan *rpctypes.StandardV1StreamedBlockEvent, 10), - HeadChan: make(chan *rpctypes.StandardV1StreamedHeadEvent, 10), + ReadyChan: make(chan bool), + EventChan: make(chan *BeaconStreamEvent, 10), } - go blockStream.startStream(bc.endpoint) + go blockStream.startStream() return &blockStream } @@ -41,7 +55,7 @@ func (bs *BeaconStream) Start() { return } bs.running = true - go bs.startStream(bs.endpoint) + go bs.startStream() } func (bs *BeaconStream) Close() { @@ -53,12 +67,13 @@ func (bs *BeaconStream) Close() { defer bs.runMutex.Unlock() } -func (bs *BeaconStream) startStream(endpoint string) { +func (bs *BeaconStream) startStream() { bs.runMutex.Lock() defer bs.runMutex.Unlock() - stream := bs.subscribeStream(endpoint) + stream := bs.subscribeStream(bs.endpoint, bs.events) if stream != nil { + bs.ready = true running := true for running { select { @@ -68,17 +83,16 @@ func (bs *BeaconStream) startStream(endpoint string) { bs.processBlockEvent(evt) } else if evt.Event() == "head" { bs.processHeadEvent(evt) + } else if evt.Event() == "finalized_checkpoint" { + bs.processFinalizedEvent(evt) } case <-bs.killChan: running = false - case <-time.After(300 * time.Second): - // timeout - no block since 5 mins - logger.Errorf("beacon block stream error, no new head retrieved since %v (%v ago)", bs.lastHeadSeen, time.Since(bs.lastHeadSeen)) - stream.Close() - stream = bs.subscribeStream(endpoint) - if stream == nil { - running = false - } + case <-stream.Ready: + bs.ReadyChan <- true + case err := <-stream.Errors: + logger.Errorf("beacon block stream error: %v", err) + bs.ReadyChan <- false } } } @@ -86,14 +100,38 @@ func (bs *BeaconStream) startStream(endpoint string) { stream.Close() } bs.running = false - bs.CloseChan <- true } -func (bs *BeaconStream) subscribeStream(endpoint string) *eventsource.Stream { +func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventstream.Stream { + var topics strings.Builder + topicsCount := 0 + if events&StreamBlockEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + fmt.Fprintf(&topics, "block") + topicsCount++ + } + if events&StreamHeadEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + fmt.Fprintf(&topics, "head") + topicsCount++ + } + if events&StreamFinalizedEvent > 0 { + if topicsCount > 0 { + fmt.Fprintf(&topics, ",") + } + fmt.Fprintf(&topics, "finalized_checkpoint") + topicsCount++ + } + for { - stream, err := eventsource.Subscribe(fmt.Sprintf("%s/eth/v1/events?topics=block,head", endpoint), "") + url := fmt.Sprintf("%s/eth/v1/events?topics=%v", endpoint, topics.String()) + stream, err := eventstream.Subscribe(url, "") if err != nil { - logger.Errorf("Error while subscribing beacon event stream: %v", err) + logger.Errorf("Error while subscribing beacon event stream %v: %v", url, err) select { case <-bs.killChan: return nil @@ -113,7 +151,10 @@ func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) { return } logger.Debugf("RPC block event! slot: %v, block: %v", parsed.Slot, parsed.Block) - bs.BlockChan <- &parsed + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamBlockEvent, + Data: &parsed, + } } func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) { @@ -125,5 +166,22 @@ func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) { } logger.Debugf("RPC head event! slot: %v, block: %v, state: %v", parsed.Slot, parsed.Block, parsed.State) bs.lastHeadSeen = time.Now() - bs.HeadChan <- &parsed + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamHeadEvent, + Data: &parsed, + } +} + +func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) { + var parsed rpctypes.StandardV1StreamedFinalizedCheckpointEvent + err := json.Unmarshal([]byte(evt.Data()), &parsed) + if err != nil { + logger.Warnf("beacon block stream failed to decode finalized_checkpoint event: %v", err) + return + } + logger.Debugf("RPC finalized_checkpoint event! epoch: %v, block: %v, state: %v", parsed.Epoch, parsed.Block, parsed.State) + bs.EventChan <- &BeaconStreamEvent{ + Event: StreamFinalizedEvent, + Data: &parsed, + } } diff --git a/rpc/eventstream/eventstream.go b/rpc/eventstream/eventstream.go new file mode 100644 index 00000000..e7eac41c --- /dev/null +++ b/rpc/eventstream/eventstream.go @@ -0,0 +1,210 @@ +package eventstream + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "sync" + "time" + + "github.com/donovanhide/eventsource" +) + +// Stream handles a connection for receiving Server Sent Events. +// It will try and reconnect if the connection is lost, respecting both +// received retry delays and event id's. +type Stream struct { + c *http.Client + req *http.Request + lastEventId string + retry time.Duration + // Events emits the events received by the stream + Events chan StreamEvent + Ready chan bool + // Errors emits any errors encountered while reading events from the stream. + // It's mainly for informative purposes - the client isn't required to take any + // action when an error is encountered. The stream will always attempt to continue, + // even if that involves reconnecting to the server. + Errors chan error + // Logger is a logger that, when set, will be used for logging debug messages + Logger *log.Logger + // isClosed is a marker that the stream is/should be closed + isClosed bool + // isClosedMutex is a mutex protecting concurrent read/write access of isClosed + closeMutex sync.Mutex +} + +type StreamEvent interface { + // Id is an identifier that can be used to allow a client to replay + // missed Events by returning the Last-Event-Id header. + // Return empty string if not required. + Id() string + // The name of the event. Return empty string if not required. + Event() string + // The payload of the event. + Data() string + Retry() int64 +} + +type SubscriptionError struct { + Code int + Message string +} + +func (e SubscriptionError) Error() string { + return fmt.Sprintf("%d: %s", e.Code, e.Message) +} + +// Subscribe to the Events emitted from the specified url. +// If lastEventId is non-empty it will be sent to the server in case it can replay missed events. +func Subscribe(url, lastEventId string) (*Stream, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return SubscribeWithRequest(lastEventId, req) +} + +// SubscribeWithRequest will take an http.Request to setup the stream, allowing custom headers +// to be specified, authentication to be configured, etc. +func SubscribeWithRequest(lastEventId string, request *http.Request) (*Stream, error) { + return SubscribeWith(lastEventId, http.DefaultClient, request) +} + +// SubscribeWith takes a http client and request providing customization over both headers and +// control over the http client settings (timeouts, tls, etc) +func SubscribeWith(lastEventId string, client *http.Client, request *http.Request) (*Stream, error) { + stream := &Stream{ + c: client, + req: request, + lastEventId: lastEventId, + retry: time.Millisecond * 3000, + Events: make(chan StreamEvent), + Errors: make(chan error, 10), + Ready: make(chan bool), + } + stream.c.CheckRedirect = checkRedirect + + r, err := stream.connect() + if err != nil { + return nil, err + } + go stream.stream(r) + return stream, nil +} + +// Close will close the stream. It is safe for concurrent access and can be called multiple times. +func (stream *Stream) Close() { + go func() { + stream.closeMutex.Lock() + defer stream.closeMutex.Unlock() + if stream.isClosed { + return + } + fmt.Printf("closed event stream\n") + + stream.isClosed = true + close(stream.Errors) + close(stream.Events) + }() +} + +// Go's http package doesn't copy headers across when it encounters +// redirects so we need to do that manually. +func checkRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + for k, vv := range via[0].Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + return nil +} + +func (stream *Stream) connect() (r io.ReadCloser, err error) { + var resp *http.Response + stream.req.Header.Set("Cache-Control", "no-cache") + stream.req.Header.Set("Accept", "text/event-stream") + if len(stream.lastEventId) > 0 { + stream.req.Header.Set("Last-Event-ID", stream.lastEventId) + } + if resp, err = stream.c.Do(stream.req); err != nil { + return + } + if resp.StatusCode != 200 { + message, _ := io.ReadAll(resp.Body) + err = SubscriptionError{ + Code: resp.StatusCode, + Message: string(message), + } + } + r = resp.Body + return +} + +func (stream *Stream) stream(r io.ReadCloser) { + defer r.Close() + + stream.Ready <- true + + // receives events until an error is encountered + stream.receiveEvents(r) + + // tries to reconnect and start the stream again + stream.retryRestartStream() +} + +func (stream *Stream) receiveEvents(r io.ReadCloser) { + dec := eventsource.NewDecoder(r) + + for { + ev, err := dec.Decode() + stream.closeMutex.Lock() + if stream.isClosed { + stream.closeMutex.Unlock() + return + } + if err != nil { + stream.Errors <- err + stream.closeMutex.Unlock() + return + } + stream.closeMutex.Unlock() + + pub := ev.(StreamEvent) + if pub.Retry() > 0 { + stream.retry = time.Duration(pub.Retry()) * time.Millisecond + } + if len(pub.Id()) > 0 { + stream.lastEventId = pub.Id() + } + stream.Events <- pub + } +} + +func (stream *Stream) retryRestartStream() { + backoff := stream.retry + for { + if stream.Logger != nil { + stream.Logger.Printf("Reconnecting in %0.4f secs\n", backoff.Seconds()) + } + time.Sleep(backoff) + if stream.isClosed { + return + } + // NOTE: because of the defer we're opening the new connection + // before closing the old one. Shouldn't be a problem in practice, + // but something to be aware of. + r, err := stream.connect() + if err == nil { + go stream.stream(r) + return + } + stream.Errors <- err + backoff = 10 * time.Second + } +} diff --git a/rpctypes/beaconapi.go b/rpctypes/beaconapi.go index c5bf13d1..74a4ce5c 100644 --- a/rpctypes/beaconapi.go +++ b/rpctypes/beaconapi.go @@ -16,6 +16,13 @@ type StandardV1StreamedHeadEvent struct { ExecutionOptimistic bool `json:"execution_optimistic"` } +type StandardV1StreamedFinalizedCheckpointEvent struct { + Epoch Uint64Str `json:"epoch"` + Block BytesHexStr `json:"block"` + State BytesHexStr `json:"state"` + ExecutionOptimistic bool `json:"execution_optimistic"` +} + type StandardV1BeaconHeaderResponse struct { Finalized bool `json:"finalized"` Data struct { @@ -40,8 +47,9 @@ type StandardV2BeaconBlockResponse struct { } type CombinedBlockResponse struct { - Header *StandardV1BeaconHeaderResponse - Block *StandardV2BeaconBlockResponse + Root []byte + Header *SignedBeaconBlockHeader + Block *SignedBeaconBlock Blobs *StandardV1BlobSidecarsResponse Orphaned bool } @@ -72,8 +80,7 @@ type StandardV1SyncCommitteesResponse struct { type EpochAssignments struct { DependendRoot BytesHexStr `json:"dep_root"` - DependendState BytesHexStr `json:"dep_state"` - DependendIsGenesis bool `json:"dep_genesis"` + DependendStateRef string `json:"dep_state"` ProposerAssignments map[uint64]uint64 `json:"prop"` AttestorAssignments map[string][]uint64 `json:"att"` SyncAssignments []uint64 `json:"sync"` @@ -104,3 +111,9 @@ type StandardV1NodeSyncingResponse struct { ElOffline bool `json:"el_offline"` } `json:"data"` } + +type StandardV1NodeVersionResponse struct { + Data struct { + Version string `json:"version"` + } `json:"data"` +} diff --git a/services/beaconservice.go b/services/beaconservice.go index 669c4cf3..78690803 100644 --- a/services/beaconservice.go +++ b/services/beaconservice.go @@ -1,20 +1,22 @@ package services import ( + "encoding/json" + "sort" "strings" "sync" "time" + "github.com/ethereum/go-ethereum/common/lru" "github.com/pk910/light-beaconchain-explorer/db" "github.com/pk910/light-beaconchain-explorer/dbtypes" "github.com/pk910/light-beaconchain-explorer/indexer" - "github.com/pk910/light-beaconchain-explorer/rpc" "github.com/pk910/light-beaconchain-explorer/rpctypes" "github.com/pk910/light-beaconchain-explorer/utils" + "github.com/sirupsen/logrus" ) type BeaconService struct { - rpcClient *rpc.BeaconClient indexer *indexer.Indexer validatorNames *ValidatorNames @@ -24,6 +26,9 @@ type BeaconService struct { epochLimit uint64 activity map[uint64]uint8 } + + assignmentsCacheMux sync.Mutex + assignmentsCache *lru.Cache[uint64, *rpctypes.EpochAssignments] } var GlobalBeaconService *BeaconService @@ -34,18 +39,13 @@ func StartBeaconService() error { return nil } - rpcClient, err := rpc.NewBeaconClient(utils.Config.BeaconApi.Endpoint, utils.Config.BeaconApi.AssignmentsCacheSize) + indexer, err := indexer.NewIndexer() if err != nil { return err } - indexer, err := indexer.NewIndexer(rpcClient) - if err != nil { - return err - } - err = indexer.Start() - if err != nil { - return err + for idx, endpoint := range utils.Config.BeaconApi.Endpoints { + indexer.AddClient(uint8(idx), endpoint.Name, endpoint.Url, endpoint.Archive, endpoint.Priority) } validatorNames := &ValidatorNames{} @@ -57,9 +57,9 @@ func StartBeaconService() error { } GlobalBeaconService = &BeaconService{ - rpcClient: rpcClient, - indexer: indexer, - validatorNames: validatorNames, + indexer: indexer, + validatorNames: validatorNames, + assignmentsCache: lru.NewCache[uint64, *rpctypes.EpochAssignments](10), } return nil } @@ -72,12 +72,8 @@ func (bs *BeaconService) GetCachedValidatorSet() *rpctypes.StandardV1StateValida return bs.indexer.GetCachedValidatorSet() } -func (bs *BeaconService) GetFinalizedBlockHead() (*rpctypes.StandardV1BeaconHeaderResponse, error) { - return bs.rpcClient.GetFinalizedBlockHead() -} - -func (bs *BeaconService) GetLowestCachedSlot() int64 { - return bs.indexer.GetLowestCachedSlot() +func (bs *BeaconService) GetFinalizedEpoch() (int64, []byte) { + return bs.indexer.GetFinalizedEpoch() } func (bs *BeaconService) GetCachedEpochStats(epoch uint64) *indexer.EpochStats { @@ -85,38 +81,40 @@ func (bs *BeaconService) GetCachedEpochStats(epoch uint64) *indexer.EpochStats { } func (bs *BeaconService) GetGenesis() (*rpctypes.StandardV1GenesisResponse, error) { - return bs.rpcClient.GetGenesis() + return bs.indexer.GetRpcClient(false, nil).GetGenesis() } func (bs *BeaconService) GetSlotDetailsByBlockroot(blockroot []byte, withBlobs bool) (*rpctypes.CombinedBlockResponse, error) { var result *rpctypes.CombinedBlockResponse if blockInfo := bs.indexer.GetCachedBlock(blockroot); blockInfo != nil { result = &rpctypes.CombinedBlockResponse{ - Header: blockInfo.Header, - Block: blockInfo.Block, - Orphaned: blockInfo.Orphaned, + Root: blockInfo.Root, + Header: blockInfo.GetHeader(), + Block: blockInfo.GetBlockBody(), + Orphaned: !blockInfo.IsCanonical(bs.indexer, nil), } } else { - header, err := bs.rpcClient.GetBlockHeaderByBlockroot(blockroot) + header, err := bs.indexer.GetRpcClient(false, blockroot).GetBlockHeaderByBlockroot(blockroot) if err != nil { return nil, err } if header == nil { return nil, nil } - block, err := bs.rpcClient.GetBlockBodyByBlockroot(header.Data.Root) + block, err := bs.indexer.GetRpcClient(false, blockroot).GetBlockBodyByBlockroot(header.Data.Root) if err != nil { return nil, err } result = &rpctypes.CombinedBlockResponse{ - Header: header, - Block: block, + Root: header.Data.Root, + Header: &header.Data.Header, + Block: &block.Data, Orphaned: !header.Data.Canonical, } } - if result.Block.Data.Message.Body.BlobKzgCommitments != nil && withBlobs && utils.EpochOfSlot(uint64(result.Header.Data.Header.Message.Slot)) >= utils.Config.Chain.Config.DenebForkEpoch { - blobs, _ := bs.rpcClient.GetBlobSidecarsByBlockroot(result.Header.Data.Root) + if result.Block.Message.Body.BlobKzgCommitments != nil && withBlobs && utils.EpochOfSlot(uint64(result.Header.Message.Slot)) >= utils.Config.Chain.Config.DenebForkEpoch { + blobs, _ := bs.indexer.GetRpcClient(true, blockroot).GetBlobSidecarsByBlockroot(result.Root) if blobs != nil { result.Blobs = blobs } @@ -127,9 +125,9 @@ func (bs *BeaconService) GetSlotDetailsByBlockroot(blockroot []byte, withBlobs b func (bs *BeaconService) GetSlotDetailsBySlot(slot uint64, withBlobs bool) (*rpctypes.CombinedBlockResponse, error) { var result *rpctypes.CombinedBlockResponse if cachedBlocks := bs.indexer.GetCachedBlocks(slot); cachedBlocks != nil { - var cachedBlock *indexer.BlockInfo + var cachedBlock *indexer.CacheBlock for _, block := range cachedBlocks { - if !block.Orphaned { + if block.IsCanonical(bs.indexer, nil) { cachedBlock = block break } @@ -138,31 +136,33 @@ func (bs *BeaconService) GetSlotDetailsBySlot(slot uint64, withBlobs bool) (*rpc cachedBlock = cachedBlocks[0] } result = &rpctypes.CombinedBlockResponse{ - Header: cachedBlock.Header, - Block: cachedBlock.Block, - Orphaned: cachedBlock.Orphaned, + Root: cachedBlock.Root, + Header: cachedBlock.GetHeader(), + Block: cachedBlock.GetBlockBody(), + Orphaned: !cachedBlock.IsCanonical(bs.indexer, nil), } } else { - header, err := bs.rpcClient.GetBlockHeaderBySlot(slot) + header, err := bs.indexer.GetRpcClient(false, nil).GetBlockHeaderBySlot(slot) if err != nil { return nil, err } if header == nil { return nil, nil } - block, err := bs.rpcClient.GetBlockBodyByBlockroot(header.Data.Root) + block, err := bs.indexer.GetRpcClient(false, header.Data.Root).GetBlockBodyByBlockroot(header.Data.Root) if err != nil { return nil, err } result = &rpctypes.CombinedBlockResponse{ - Header: header, - Block: block, + Root: header.Data.Root, + Header: &header.Data.Header, + Block: &block.Data, Orphaned: !header.Data.Canonical, } } - if result.Block.Data.Message.Body.BlobKzgCommitments != nil && withBlobs && utils.EpochOfSlot(uint64(result.Header.Data.Header.Message.Slot)) >= utils.Config.Chain.Config.DenebForkEpoch { - blobs, _ := bs.rpcClient.GetBlobSidecarsByBlockroot(result.Header.Data.Root) + if result.Block.Message.Body.BlobKzgCommitments != nil && withBlobs && utils.EpochOfSlot(uint64(result.Header.Message.Slot)) >= utils.Config.Chain.Config.DenebForkEpoch { + blobs, _ := bs.indexer.GetRpcClient(true, result.Root).GetBlobSidecarsByBlockroot(result.Root) if blobs != nil { result.Blobs = blobs } @@ -171,7 +171,7 @@ func (bs *BeaconService) GetSlotDetailsBySlot(slot uint64, withBlobs bool) (*rpc } func (bs *BeaconService) GetBlobSidecarsByBlockRoot(blockroot []byte) (*rpctypes.StandardV1BlobSidecarsResponse, error) { - return bs.rpcClient.GetBlobSidecarsByBlockroot(blockroot) + return bs.indexer.GetRpcClient(true, blockroot).GetBlobSidecarsByBlockroot(blockroot) } func (bs *BeaconService) GetOrphanedBlock(blockroot []byte) *rpctypes.CombinedBlockResponse { @@ -179,87 +179,121 @@ func (bs *BeaconService) GetOrphanedBlock(blockroot []byte) *rpctypes.CombinedBl if orphanedBlock == nil { return nil } - blockInfo := indexer.ParseOrphanedBlock(orphanedBlock) - if blockInfo == nil { + + var header rpctypes.SignedBeaconBlockHeader + err := json.Unmarshal([]byte(orphanedBlock.Header), &header) + if err != nil { + logrus.Warnf("Error parsing orphaned block header from db: %v", err) + return nil + } + var block rpctypes.SignedBeaconBlock + err = json.Unmarshal([]byte(orphanedBlock.Block), &block) + if err != nil { + logrus.Warnf("Error parsing orphaned block body from db: %v", err) return nil } return &rpctypes.CombinedBlockResponse{ - Header: blockInfo.Header, - Block: blockInfo.Block, + Root: orphanedBlock.Root, + Header: &header, + Block: &block, Blobs: nil, Orphaned: true, } } func (bs *BeaconService) GetCachedBlockByBlockroot(blockroot []byte) *rpctypes.CombinedBlockResponse { - blockInfo := bs.indexer.GetCachedBlock(blockroot) - if blockInfo == nil { + cachedBlock := bs.indexer.GetCachedBlock(blockroot) + if cachedBlock == nil { return nil } return &rpctypes.CombinedBlockResponse{ - Header: blockInfo.Header, - Block: blockInfo.Block, - Orphaned: blockInfo.Orphaned, + Root: cachedBlock.Root, + Header: cachedBlock.GetHeader(), + Block: cachedBlock.GetBlockBody(), + Orphaned: !cachedBlock.IsCanonical(bs.indexer, nil), } } func (bs *BeaconService) GetCachedBlockByStateroot(stateroot []byte) *rpctypes.CombinedBlockResponse { - blockInfo := bs.indexer.GetCachedBlockByStateroot(stateroot) - if blockInfo == nil { + cachedBlock := bs.indexer.GetCachedBlockByStateroot(stateroot) + if cachedBlock == nil { return nil } return &rpctypes.CombinedBlockResponse{ - Header: blockInfo.Header, - Block: blockInfo.Block, - Orphaned: blockInfo.Orphaned, + Root: cachedBlock.Root, + Header: cachedBlock.GetHeader(), + Block: cachedBlock.GetBlockBody(), + Orphaned: !cachedBlock.IsCanonical(bs.indexer, nil), } } func (bs *BeaconService) GetEpochAssignments(epoch uint64) (*rpctypes.EpochAssignments, error) { - idxMinSlot := bs.indexer.GetLowestCachedSlot() - if idxMinSlot >= 0 && epoch >= utils.EpochOfSlot(uint64(idxMinSlot)) { + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + + if int64(epoch) > finalizedEpoch { epochStats := bs.indexer.GetCachedEpochStats(epoch) if epochStats != nil { - return epochStats.Assignments, nil + epochAssignments := &rpctypes.EpochAssignments{ + DependendRoot: epochStats.DependentRoot, + DependendStateRef: epochStats.GetDependentStateRef(), + ProposerAssignments: epochStats.GetProposerAssignments(), + AttestorAssignments: epochStats.GetAttestorAssignments(), + SyncAssignments: epochStats.GetSyncAssignments(), + } + return epochAssignments, nil } else { return nil, nil } } - return bs.rpcClient.GetEpochAssignments(epoch) + bs.assignmentsCacheMux.Lock() + epochAssignments, found := bs.assignmentsCache.Get(epoch) + bs.assignmentsCacheMux.Unlock() + if found { + return epochAssignments, nil + } + + var err error + epochAssignments, err = bs.indexer.GetRpcClient(true, nil).GetEpochAssignments(epoch) + if err != nil { + return nil, err + } + + bs.assignmentsCacheMux.Lock() + bs.assignmentsCache.Add(epoch, epochAssignments) + bs.assignmentsCacheMux.Unlock() + + return epochAssignments, nil } func (bs *BeaconService) GetProposerAssignments(firstEpoch uint64, lastEpoch uint64) (proposerAssignments map[uint64]uint64, synchronizedEpochs map[uint64]bool) { proposerAssignments = make(map[uint64]uint64) synchronizedEpochs = make(map[uint64]bool) - idxMinSlot := bs.indexer.GetLowestCachedSlot() - if idxMinSlot >= 0 { - idxMinEpoch := utils.EpochOfSlot(uint64(idxMinSlot)) - idxHeadEpoch := bs.indexer.GetHeadSlot() - if firstEpoch > idxHeadEpoch { - firstEpoch = idxHeadEpoch - } - - if firstEpoch >= uint64(idxMinEpoch) { - var epoch uint64 - for epochIdx := int64(firstEpoch); epochIdx >= int64(idxMinEpoch) && epochIdx >= int64(lastEpoch); epochIdx-- { - epoch = uint64(epochIdx) + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + idxMinEpoch := finalizedEpoch + 1 + idxHeadEpoch := utils.EpochOfSlot(bs.indexer.GetHighestSlot()) + if firstEpoch > idxHeadEpoch { + firstEpoch = idxHeadEpoch + } - epochStats := bs.indexer.GetCachedEpochStats(epoch) - if epochStats != nil && epochStats.Assignments != nil { - synchronizedEpochs[epoch] = true - for slot, vidx := range epochStats.Assignments.ProposerAssignments { - proposerAssignments[slot] = vidx - } + if firstEpoch >= uint64(idxMinEpoch) { + var epoch uint64 + for epochIdx := int64(firstEpoch); epochIdx >= int64(idxMinEpoch) && epochIdx >= int64(lastEpoch); epochIdx-- { + epoch = uint64(epochIdx) + epochStats := bs.indexer.GetCachedEpochStats(epoch) + if epochStats != nil { + synchronizedEpochs[epoch] = true + for slot, vidx := range epochStats.GetProposerAssignments() { + proposerAssignments[slot] = vidx } } - if epoch <= lastEpoch { - return - } - firstEpoch = epoch } + if epoch <= lastEpoch { + return + } + firstEpoch = epoch } // load from db @@ -281,12 +315,10 @@ func (bs *BeaconService) GetDbEpochs(firstEpoch uint64, limit uint32) []*dbtypes dbIdx := 0 dbCnt := len(dbEpochs) - idxMinSlot := bs.indexer.GetLowestCachedSlot() + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() var idxMinEpoch, idxHeadEpoch uint64 - if idxMinSlot >= 0 { - idxMinEpoch = utils.EpochOfSlot(uint64(idxMinSlot)) - idxHeadEpoch = utils.EpochOfSlot(bs.indexer.GetHeadSlot()) - } + idxMinEpoch = uint64(finalizedEpoch + 1) + idxHeadEpoch = utils.EpochOfSlot(bs.indexer.GetHighestSlot()) lastEpoch := int64(firstEpoch) - int64(limit) if lastEpoch < 0 { @@ -299,7 +331,7 @@ func (bs *BeaconService) GetDbEpochs(firstEpoch uint64, limit uint32) []*dbtypes resEpoch = dbEpochs[dbIdx] dbIdx++ } - if idxMinSlot >= 0 && epoch >= idxMinEpoch && epoch <= idxHeadEpoch { + if epoch >= idxMinEpoch && epoch <= idxHeadEpoch { resEpoch = bs.indexer.BuildLiveEpoch(epoch) } if resEpoch != nil { @@ -315,8 +347,9 @@ func (bs *BeaconService) GetDbBlocks(firstSlot uint64, limit int32, withOrphaned resBlocks := make([]*dbtypes.Block, limit) resIdx := 0 - idxMinSlot := bs.indexer.GetLowestCachedSlot() - idxHeadSlot := bs.indexer.GetHeadSlot() + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + idxMinSlot := (finalizedEpoch + 1) * int64(utils.Config.Chain.Config.SlotsPerEpoch) + idxHeadSlot := bs.indexer.GetHighestSlot() if firstSlot > idxHeadSlot { firstSlot = idxHeadSlot } @@ -329,7 +362,7 @@ func (bs *BeaconService) GetDbBlocks(firstSlot uint64, limit int32, withOrphaned if blocks != nil { for bidx := 0; bidx < len(blocks) && resIdx < int(limit); bidx++ { block := blocks[bidx] - if block.Orphaned && !withOrphaned { + if !withOrphaned && !block.IsCanonical(bs.indexer, nil) { continue } dbBlock := bs.indexer.BuildLiveBlock(block) @@ -345,12 +378,13 @@ func (bs *BeaconService) GetDbBlocks(firstSlot uint64, limit int32, withOrphaned } } - if resIdx < int(limit) && slot >= 0 { + if resIdx < int(limit) { dbBlocks := db.GetBlocks(slot, uint32(limit-int32(resIdx)), withOrphaned) - if dbBlocks != nil { - for idx := 0; idx < len(dbBlocks) && resIdx < int(limit); idx++ { - resBlocks[resIdx] = dbBlocks[idx] - resIdx++ + for _, dbBlock := range dbBlocks { + resBlocks[resIdx] = dbBlock + resIdx++ + if resIdx >= int(limit) { + break } } } @@ -361,8 +395,9 @@ func (bs *BeaconService) GetDbBlocks(firstSlot uint64, limit int32, withOrphaned func (bs *BeaconService) GetDbBlocksForSlots(firstSlot uint64, slotLimit uint32, withOrphaned bool) []*dbtypes.Block { resBlocks := make([]*dbtypes.Block, 0) - idxMinSlot := bs.indexer.GetLowestCachedSlot() - idxHeadSlot := bs.indexer.GetHeadSlot() + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + idxMinSlot := (finalizedEpoch + 1) * int64(utils.Config.Chain.Config.SlotsPerEpoch) + idxHeadSlot := bs.indexer.GetHighestSlot() if firstSlot > idxHeadSlot { firstSlot = idxHeadSlot } @@ -381,7 +416,7 @@ func (bs *BeaconService) GetDbBlocksForSlots(firstSlot uint64, slotLimit uint32, if blocks != nil { for bidx := 0; bidx < len(blocks); bidx++ { block := blocks[bidx] - if block.Orphaned && !withOrphaned { + if !withOrphaned && !block.IsCanonical(bs.indexer, nil) { continue } dbBlock := bs.indexer.BuildLiveBlock(block) @@ -396,7 +431,7 @@ func (bs *BeaconService) GetDbBlocksForSlots(firstSlot uint64, slotLimit uint32, } } - if slot > lastSlot && slot >= 0 { + if slot > lastSlot { dbBlocks := db.GetBlocksForSlots(slot, lastSlot, withOrphaned) if dbBlocks != nil { for idx := 0; idx < len(dbBlocks); idx++ { @@ -409,9 +444,10 @@ func (bs *BeaconService) GetDbBlocksForSlots(firstSlot uint64, slotLimit uint32, } func (bs *BeaconService) GetDbBlocksByGraffiti(graffiti string, pageIdx uint64, pageSize uint32, withOrphaned bool) []*dbtypes.Block { - cachedMatches := make([]*indexer.BlockInfo, 0) - idxMinSlot := bs.indexer.GetLowestCachedSlot() - idxHeadSlot := bs.indexer.GetHeadSlot() + cachedMatches := make([]*indexer.CacheBlock, 0) + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + idxMinSlot := (finalizedEpoch + 1) * int64(utils.Config.Chain.Config.SlotsPerEpoch) + idxHeadSlot := bs.indexer.GetHighestSlot() if idxMinSlot >= 0 { for slotIdx := int64(idxHeadSlot); slotIdx >= int64(idxMinSlot); slotIdx-- { slot := uint64(slotIdx) @@ -419,10 +455,10 @@ func (bs *BeaconService) GetDbBlocksByGraffiti(graffiti string, pageIdx uint64, if blocks != nil { for bidx := 0; bidx < len(blocks); bidx++ { block := blocks[bidx] - if block.Orphaned && !withOrphaned { + if !withOrphaned && !block.IsCanonical(bs.indexer, nil) { continue } - blockGraffiti := string(block.Block.Data.Message.Body.Graffiti) + blockGraffiti := string(block.GetBlockBody().Message.Body.Graffiti) if !strings.Contains(blockGraffiti, graffiti) { continue } @@ -475,11 +511,7 @@ func (bs *BeaconService) GetDbBlocksByGraffiti(graffiti string, pageIdx uint64, } else { dbBlocks = db.GetBlocksWithGraffiti(graffiti, dbMinSlot, (dbPage-1)*uint64(pageSize)+dbCacheOffset, pageSize+1, withOrphaned) } - if dbBlocks != nil { - for _, dbBlock := range dbBlocks { - resBlocks = append(resBlocks, dbBlock) - } - } + resBlocks = append(resBlocks, dbBlocks...) return resBlocks } @@ -487,56 +519,58 @@ func (bs *BeaconService) GetDbBlocksByGraffiti(graffiti string, pageIdx uint64, func (bs *BeaconService) GetDbBlocksByProposer(proposer uint64, pageIdx uint64, pageSize uint32, withMissing bool, withOrphaned bool) []*dbtypes.AssignedBlock { cachedMatches := make([]struct { slot uint64 - block *indexer.BlockInfo + block *indexer.CacheBlock }, 0) - idxMinSlot := bs.indexer.GetLowestCachedSlot() - idxHeadSlot := bs.indexer.GetHeadSlot() - if idxMinSlot >= 0 { + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + idxMinSlot := (finalizedEpoch + 1) * int64(utils.Config.Chain.Config.SlotsPerEpoch) + + // get proposed blocks + proposedMap := map[uint64]bool{} + for _, block := range bs.indexer.GetCachedBlocksByProposer(proposer) { + if !withOrphaned && !block.IsCanonical(bs.indexer, nil) { + continue + } + proposedMap[block.Slot] = true + cachedMatches = append(cachedMatches, struct { + slot uint64 + block *indexer.CacheBlock + }{ + slot: block.Slot, + block: block, + }) + } + + if withMissing { + // add missed blocks + idxHeadSlot := bs.indexer.GetHighestSlot() idxHeadEpoch := utils.EpochOfSlot(idxHeadSlot) idxMinEpoch := utils.EpochOfSlot(uint64(idxMinSlot)) for epochIdx := int64(idxHeadEpoch); epochIdx >= int64(idxMinEpoch); epochIdx-- { epoch := uint64(epochIdx) epochStats := bs.indexer.GetCachedEpochStats(epoch) - if epochStats == nil || epochStats.Assignments == nil { + if epochStats == nil { continue } - - for slot, assigned := range epochStats.Assignments.ProposerAssignments { + for slot, assigned := range epochStats.GetProposerAssignments() { if assigned != proposer { continue } - blocks := bs.indexer.GetCachedBlocks(slot) - haveBlock := false - if blocks != nil { - for bidx := 0; bidx < len(blocks); bidx++ { - block := blocks[bidx] - if block.Orphaned && !withOrphaned { - continue - } - if uint64(block.Block.Data.Message.ProposerIndex) != proposer { - continue - } - cachedMatches = append(cachedMatches, struct { - slot uint64 - block *indexer.BlockInfo - }{ - slot: slot, - block: block, - }) - haveBlock = true - } - } - if !haveBlock && withMissing { - cachedMatches = append(cachedMatches, struct { - slot uint64 - block *indexer.BlockInfo - }{ - slot: slot, - block: nil, - }) + if proposedMap[slot] { + continue } + cachedMatches = append(cachedMatches, struct { + slot uint64 + block *indexer.CacheBlock + }{ + slot: slot, + block: nil, + }) } - + sort.Slice(cachedMatches, func(a, b int) bool { + slotA := cachedMatches[a].slot + slotB := cachedMatches[b].slot + return slotA > slotB + }) } } @@ -598,11 +632,7 @@ func (bs *BeaconService) GetDbBlocksByProposer(proposer uint64, pageIdx uint64, } else { dbBlocks = db.GetAssignedBlocks(proposer, dbMinSlot, (dbPage-1)*uint64(pageSize)+dbCacheOffset, pageSize+1, withOrphaned) } - if dbBlocks != nil { - for _, dbBlock := range dbBlocks { - resBlocks = append(resBlocks, dbBlock) - } - } + resBlocks = append(resBlocks, dbBlocks...) return resBlocks } @@ -611,17 +641,19 @@ func (bs *BeaconService) GetValidatorActivity() (map[uint64]uint8, uint64) { activityMap := map[uint64]uint8{} epochLimit := uint64(3) - idxHeadSlot := bs.indexer.GetHeadSlot() + idxHeadSlot := bs.indexer.GetHighestSlot() idxHeadEpoch := utils.EpochOfSlot(idxHeadSlot) if idxHeadEpoch < 1 { return activityMap, 0 } idxHeadEpoch-- - idxMinSlot := bs.indexer.GetLowestCachedSlot() - if idxMinSlot < 0 { - return activityMap, 0 + finalizedEpoch, _ := bs.indexer.GetFinalizedEpoch() + var idxMinEpoch uint64 + if finalizedEpoch < 0 { + idxMinEpoch = 0 + } else { + idxMinEpoch = uint64(finalizedEpoch + 1) } - idxMinEpoch := utils.EpochOfSlot(uint64(idxMinSlot)) activityEpoch := utils.EpochOfSlot(idxHeadSlot - 1) bs.validatorActivityMutex.Lock() @@ -639,7 +671,7 @@ func (bs *BeaconService) GetValidatorActivity() (map[uint64]uint8, uint64) { for epochIdx := int64(idxHeadEpoch); epochIdx >= int64(idxMinEpoch); epochIdx-- { epoch := uint64(epochIdx) - epochVotes := bs.indexer.GetEpochVotes(epoch) + _, epochVotes := bs.indexer.GetEpochVotes(epoch) for valIdx := range epochVotes.ActivityMap { activityMap[valIdx]++ } diff --git a/services/validatornames.go b/services/validatornames.go index bdb66a78..6e31070f 100644 --- a/services/validatornames.go +++ b/services/validatornames.go @@ -3,7 +3,7 @@ package services import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "os" "strconv" @@ -95,7 +95,7 @@ func (vn *ValidatorNames) LoadFromRangesApi(apiUrl string) error { logrus.Errorf("Could not fetch validator names from inventory (%v): not found", apiUrl) return nil } - data, _ := ioutil.ReadAll(resp.Body) + data, _ := io.ReadAll(resp.Body) return fmt.Errorf("url: %v, error-response: %s", apiUrl, data) } rangesResponse := &validatorNamesRangesResponse{} diff --git a/test-config.yaml b/test-config.yaml index 3b02ec77..9d4ffdfe 100644 --- a/test-config.yaml +++ b/test-config.yaml @@ -1,5 +1,5 @@ logging: - outputLevel: "info" + #outputLevel: "debug" #outputStderr: false #filePath: "explorer.log" @@ -32,18 +32,26 @@ frontend: #validatorNamesInventory: "https://config.4844-devnet-7.ethpandaops.io/api/v1/nodes/validator-ranges" beaconapi: - endpoint: "http://10.16.71.108:5052" + endpoints: + - name: "pk01" + url: "http://10.16.71.108:5052" + priority: 4 + archive: true + - name: "lh" + url: "http://10.16.97.2:5052" + priority: 5 + - name: "teku" + url: "http://10.16.97.3:5051" + priority: 5 #endpoint: "http://10.16.71.102:5052" localCacheSize: 100 # 100MB redisCacheAddr: "" redisCachePrefix: "" indexer: - prepopulateEpochs: 2 - inMemoryEpochs: 10 - epochProcessingDelay: 2 + inMemoryEpochs: 2 disableIndexWriter: false - syncEpochCooldown: 2 + syncEpochCooldown: 1 database: engine: "sqlite" diff --git a/types/config.go b/types/config.go index e2e135a6..7bb0362b 100644 --- a/types/config.go +++ b/types/config.go @@ -44,7 +44,8 @@ type Config struct { } `yaml:"frontend"` BeaconApi struct { - Endpoint string `yaml:"endpoint" envconfig:"BEACONAPI_ENDPOINT"` + Endpoint string `yaml:"endpoint" envconfig:"BEACONAPI_ENDPOINT"` + Endpoints []EndpointConfig `yaml:"endpoints"` LocalCacheSize int `yaml:"localCacheSize" envconfig:"BEACONAPI_LOCAL_CACHE_SIZE"` AssignmentsCacheSize int `yaml:"assignmentsCacheSize" envconfig:"BEACONAPI_ASSIGNMENTS_CACHE_SIZE"` @@ -53,11 +54,9 @@ type Config struct { } `yaml:"beaconapi"` Indexer struct { - PrepopulateEpochs uint16 `yaml:"prepopulateEpochs" envconfig:"INDEXER_PREPOPULATE_EPOCHS"` - InMemoryEpochs uint16 `yaml:"inMemoryEpochs" envconfig:"INDEXER_IN_MEMORY_EPOCHS"` - EpochProcessingDelay uint16 `yaml:"epochProcessingDelay" envconfig:"INDEXER_EPOCH_PROCESSING_DELAY"` - DisableIndexWriter bool `yaml:"disableIndexWriter" envconfig:"INDEXER_DISABLE_INDEX_WRITER"` - SyncEpochCooldown uint `yaml:"syncEpochCooldown" envconfig:"INDEXER_SYNC_EPOCH_COOLDOWN"` + InMemoryEpochs uint16 `yaml:"inMemoryEpochs" envconfig:"INDEXER_IN_MEMORY_EPOCHS"` + DisableIndexWriter bool `yaml:"disableIndexWriter" envconfig:"INDEXER_DISABLE_INDEX_WRITER"` + SyncEpochCooldown uint `yaml:"syncEpochCooldown" envconfig:"INDEXER_SYNC_EPOCH_COOLDOWN"` } `yaml:"indexer"` Database struct { @@ -88,6 +87,13 @@ type Config struct { } `yaml:"database"` } +type EndpointConfig struct { + Url string `yaml:"url"` + Name string `yaml:"name"` + Archive bool `yaml:"archive"` + Priority int `yaml:"priority"` +} + type SqliteDatabaseConfig struct { File string MaxOpenConns int diff --git a/types/models/indexPage.go b/types/models/indexPage.go index e47d2755..757cc298 100644 --- a/types/models/indexPage.go +++ b/types/models/indexPage.go @@ -10,7 +10,7 @@ type IndexPageData struct { DepositContract string `json:"depositContract"` ShowSyncingMessage bool `json:"show_sync_message"` CurrentEpoch uint64 `json:"current_epoch"` - CurrentFinalizedEpoch uint64 `json:"current_finalized_epoch"` + CurrentFinalizedEpoch int64 `json:"current_finalized_epoch"` CurrentSlot uint64 `json:"current_slot"` CurrentSlotIndex uint64 `json:"current_slot_index"` CurrentScheduledCount uint64 `json:"current_scheduled_count"` diff --git a/utils/config.go b/utils/config.go index 87bcbae4..4503a445 100644 --- a/utils/config.go +++ b/utils/config.go @@ -91,6 +91,19 @@ func ReadConfig(cfg *types.Config, path string) error { } } + // endpoints + if cfg.BeaconApi.Endpoints == nil && cfg.BeaconApi.Endpoint != "" { + cfg.BeaconApi.Endpoints = []types.EndpointConfig{ + { + Url: cfg.BeaconApi.Endpoint, + Name: "default", + }, + } + } + if cfg.BeaconApi.Endpoints == nil || len(cfg.BeaconApi.Endpoints) == 0 { + return fmt.Errorf("missing beacon node endpoints (need at least 1 endpoint to run the explorer)") + } + log.WithFields(log.Fields{ "genesisTimestamp": cfg.Chain.GenesisTimestamp, "configName": cfg.Chain.Config.ConfigName, diff --git a/utils/format.go b/utils/format.go index 4b69f8f7..2cfdeff1 100644 --- a/utils/format.go +++ b/utils/format.go @@ -331,7 +331,7 @@ func FormatValidatorWithIndex(index uint64, name string) template.HTML { } func FormatRecentTimeShort(ts time.Time) template.HTML { - duration := ts.Sub(time.Now()) + duration := time.Until(ts) var timeStr string absDuraction := duration.Abs() if absDuraction < 1*time.Second { diff --git a/utils/logging.go b/utils/logging.go index d602dd7b..0409f7fa 100644 --- a/utils/logging.go +++ b/utils/logging.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -18,7 +17,7 @@ type LogWriter struct { } func InitLogger() *LogWriter { - logger.SetOutput(ioutil.Discard) // Send all logs to nowhere by default + logger.SetOutput(io.Discard) // Send all logs to nowhere by default logger.SetLevel(logger.TraceLevel) logWriter := &LogWriter{} diff --git a/utils/templateFucs.go b/utils/templateFucs.go index 75501c00..2c6b86e9 100644 --- a/utils/templateFucs.go +++ b/utils/templateFucs.go @@ -3,9 +3,9 @@ package utils import ( "bytes" "html/template" - "io/ioutil" "math" "math/big" + "os" "strings" "unicode/utf8" @@ -58,7 +58,7 @@ func GetTemplateFuncs() template.FuncMap { // IncludeHTML adds html to the page func IncludeHTML(path string) template.HTML { - b, err := ioutil.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { logger.Printf("includeHTML - error reading file: %v", err) return ""