Skip to content

Commit

Permalink
Genesis block: no journal data and no pruning
Browse files Browse the repository at this point in the history
  • Loading branch information
qdm12 committed Jan 12, 2023
1 parent 1ec8af4 commit ad3101e
Show file tree
Hide file tree
Showing 6 changed files with 21 additions and 12 deletions.
2 changes: 1 addition & 1 deletion dot/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ func (*nodeBuilder) initNode(cfg *Config) error {
LogLevel: cfg.Global.LogLvl,
PrunerCfg: state.PrunerConfig{
Enabled: cfg.Global.Pruning,
RetainBlocks: uint32(cfg.Global.RetainBlocks),
RetainBlocks: cfg.Global.RetainBlocks,
},
Telemetry: telemetryMailer,
Metrics: metrics.NewIntervalConfig(cfg.Global.PublishMetrics),
Expand Down
2 changes: 1 addition & 1 deletion dot/services.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (nodeBuilder) createStateService(cfg *Config) (*state.Service, error) {
LogLevel: cfg.Log.StateLvl,
PrunerCfg: state.PrunerConfig{
Enabled: cfg.Global.Pruning,
RetainBlocks: uint32(cfg.Global.RetainBlocks),
RetainBlocks: cfg.Global.RetainBlocks,
},
Metrics: metrics.NewIntervalConfig(cfg.Global.PublishMetrics),
}
Expand Down
6 changes: 6 additions & 0 deletions internal/pruner/full/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,12 @@ func (p *Pruner) RecordAndPrune(deletedNodeHashes, insertedNodeHashes map[common
blockNumber, p.nextBlockNumberToPrune))
}

if blockNumber == 0 {
// The genesis block has no node hash deletion, and no re-inserted node hashes.
// There is no node hashes to be pruned either.
return nil
}

// Delist re-inserted keys from being pruned.
// WARNING: this must be before the pruning to avoid
// pruning still needed database keys.
Expand Down
12 changes: 2 additions & 10 deletions internal/pruner/full/pruner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ func Test_Pruner(t *testing.T) {

// Block 0 hash 100
setNodeHashesInStorageDB(t, storageDB, []common.Hash{{1}, {2}})
logger.EXPECT().Debugf("journal data stored for block number %d and block hash %s",
uint32(0), "0x64000000...00000000")
err = pruner.RecordAndPrune(
map[common.Hash]struct{}{}, // first block has no deleted node hashes
map[common.Hash]struct{}{{1}: {}, {2}: {}}, // inserted node hashes
Expand All @@ -46,10 +44,8 @@ func Test_Pruner(t *testing.T) {
)
require.NoError(t, err)
keyValuePairs = keyValueMap{
"storage_" + string(common.Hash{1}.ToBytes()): []byte{0x99},
"storage_" + string(common.Hash{2}.ToBytes()): []byte{0x99},
"journal_block_number_to_hash_0": common.Hash{100}.ToBytes(),
"journal_" + string(scaleEncodeJournalKey(0, common.Hash{100})): scale.MustMarshal([]common.Hash(nil)),
"storage_" + string(common.Hash{1}.ToBytes()): []byte{0x99},
"storage_" + string(common.Hash{2}.ToBytes()): []byte{0x99},
}
assertDatabaseContent(t, database, keyValuePairs)

Expand All @@ -70,9 +66,7 @@ func Test_Pruner(t *testing.T) {
"storage_" + string(common.Hash{3}.ToBytes()): []byte{0x99},
"storage_" + string(common.Hash{4}.ToBytes()): []byte{0x99},
"journal_highest_block_number": scale.MustMarshal(uint32(1)),
"journal_block_number_to_hash_0": common.Hash{100}.ToBytes(),
"journal_block_number_to_hash_1": common.Hash{101}.ToBytes(),
"journal_" + string(scaleEncodeJournalKey(0, common.Hash{100})): scale.MustMarshal([]common.Hash(nil)),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{101})): scale.MustMarshal([]common.Hash{{1}}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scale.MustMarshal(
[]journalKey{{1, common.Hash{101}}}),
Expand All @@ -98,9 +92,7 @@ func Test_Pruner(t *testing.T) {
"storage_" + string(common.Hash{5}.ToBytes()): []byte{0x99},
"storage_" + string(common.Hash{6}.ToBytes()): []byte{0x99},
"journal_highest_block_number": scale.MustMarshal(uint32(1)),
"journal_block_number_to_hash_0": common.Hash{100}.ToBytes(),
"journal_block_number_to_hash_1": concatHashes([]common.Hash{{101}, {102}}),
"journal_" + string(scaleEncodeJournalKey(0, common.Hash{100})): scale.MustMarshal([]common.Hash(nil)),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{101})): scale.MustMarshal([]common.Hash{{1}}),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{102})): scale.MustMarshal([]common.Hash{{3}}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scale.MustMarshal(
Expand Down
5 changes: 5 additions & 0 deletions internal/pruner/full/pruning.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ func (p *Pruner) pruneAll(journalDBBatch PutDeleter) (err error) {

func prune(blockNumberToPrune uint32, journalDB Getter, journalDBBatch Deleter,
storageBatch Deleter) (err error) {
if blockNumberToPrune == 0 {
// There is no deletion in the first block, so nothing can be pruned.
return nil
}

blockHashes, err := loadBlockHashes(blockNumberToPrune, journalDB)
if err != nil {
return fmt.Errorf("loading block hashes for block number to prune: %w", err)
Expand Down
6 changes: 6 additions & 0 deletions internal/pruner/full/pruning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,12 @@ func Test_prune(t *testing.T) {
errWrapped error
errMessage string
}{
"nothing to do for block number 0": {
blockNumberToPrune: 0,
journalDBBuilder: func(ctrl *gomock.Controller) Getter { return nil },
journalBatchBuilder: func(_ *gomock.Controller) Deleter { return nil },
storageBatchBuilder: func(_ *gomock.Controller) Deleter { return nil },
},
"load block hashes error": {
blockNumberToPrune: 1,
journalDBBuilder: func(ctrl *gomock.Controller) Getter {
Expand Down

0 comments on commit ad3101e

Please sign in to comment.