Skip to content

Commit

Permalink
Use common.Hash for node hashes
Browse files Browse the repository at this point in the history
  • Loading branch information
qdm12 committed Dec 9, 2022
1 parent 0b9d1a4 commit adec1e3
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 51 deletions.
14 changes: 13 additions & 1 deletion dot/state/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,19 @@ func (s *StorageState) StoreTrie(ts *rtstorage.TrieState, header *types.Header)
return fmt.Errorf("failed to get state trie inserted keys: block %s %w", header.Hash(), err)
}

err = s.pruner.StoreJournalRecord(deletedMerkleValues, insertedMerkleValues, header.Hash(), int64(header.Number))
// Temporary work-around until we drop merkle values for node hashes in another PR (already opened).
insertedNodeHashes := make(map[common.Hash]struct{}, len(insertedMerkleValues))
for k := range insertedMerkleValues {
nodeHash := common.NewHash([]byte(k))
insertedNodeHashes[nodeHash] = struct{}{}
}
deletedNodeHashes := make(map[common.Hash]struct{}, len(deletedMerkleValues))
for k := range deletedMerkleValues {
nodeHash := common.NewHash([]byte(k))
deletedNodeHashes[nodeHash] = struct{}{}
}

err = s.pruner.StoreJournalRecord(deletedNodeHashes, insertedNodeHashes, header.Hash(), uint32(header.Number))
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion internal/pruner/archive.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,6 @@ func NewArchiveNode() *ArchiveNode {
}

// StoreJournalRecord for archive node doesn't do anything.
func (*ArchiveNode) StoreJournalRecord(_, _ map[string]struct{}, _ common.Hash, _ uint32) (_ error) {
func (*ArchiveNode) StoreJournalRecord(_, _ map[common.Hash]struct{}, _ common.Hash, _ uint32) (_ error) {
return nil
}
28 changes: 18 additions & 10 deletions internal/pruner/full.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ type journalKey struct {
type journalRecord struct {
// InsertedNodeHashes is the set of node hashes of the trie nodes
// inserted in the trie for the block.
InsertedNodeHashes map[string]struct{}
InsertedNodeHashes map[common.Hash]struct{}
// DeletedNodeHashes is the set of node hashes of the trie nodes
// removed from the trie for the block.
DeletedNodeHashes map[string]struct{}
DeletedNodeHashes map[common.Hash]struct{}
}

// NewFullNode creates a full node pruner.
Expand Down Expand Up @@ -106,7 +106,7 @@ func NewFullNode(journalDB JournalDatabase, storageDB ChainDBNewBatcher, retainB
// StoreJournalRecord stores the trie deltas impacting the storage database for a particular
// block hash. It prunes all block numbers falling off the window of block numbers to keep,
// before inserting the new record. It is thread safe to call.
func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[string]struct{},
func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[common.Hash]struct{},
blockHash common.Hash, blockNumber uint32) (err error) {
p.mutex.Lock()
defer p.mutex.Unlock()
Expand Down Expand Up @@ -185,24 +185,25 @@ func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[
return nil
}

func (p *FullNode) handleInsertedKeys(insertedNodeHashes map[string]struct{},
func (p *FullNode) handleInsertedKeys(insertedNodeHashes map[common.Hash]struct{},
blockNumber uint32, blockHash common.Hash, journalDBBatch Putter) (err error) {
for insertedNodeHash := range insertedNodeHashes {
err = p.handleInsertedKey(insertedNodeHash, blockNumber, blockHash, journalDBBatch)
if err != nil {
return fmt.Errorf("handling inserted key 0x%x: %w",
[]byte(insertedNodeHash), err)
return fmt.Errorf("handling inserted key %s: %w",
insertedNodeHash, err)
}
}

return nil
}

func (p *FullNode) handleInsertedKey(insertedNodeHash string, blockNumber uint32,
func (p *FullNode) handleInsertedKey(insertedNodeHash common.Hash, blockNumber uint32,
blockHash common.Hash, journalDBBatch Putter) (err error) {
// Try to find if the node hash was deleted in another block before
// since we no longer want to prune it, as it was re-inserted.
journalKeyDeletedAt, err := p.journalDatabase.Get([]byte(deletedNodeHashKeyPrefix + insertedNodeHash))
deletedNodeHashKey := makeDeletedKey(insertedNodeHash)
journalKeyDeletedAt, err := p.journalDatabase.Get(deletedNodeHashKey)
nodeHashDeletedInAnotherBlock := errors.Is(err, chaindb.ErrKeyNotFound)
if !nodeHashDeletedInAnotherBlock {
return nil
Expand Down Expand Up @@ -323,7 +324,7 @@ func pruneStorage(blockNumber uint32, blockHashes []common.Hash,
}

for deletedNodeHash := range record.DeletedNodeHashes {
err = batch.Del([]byte(deletedNodeHash))
err = batch.Del(deletedNodeHash.ToBytes())
if err != nil {
return fmt.Errorf("deleting key from batch: %w", err)
}
Expand Down Expand Up @@ -372,7 +373,7 @@ func storeJournalRecord(batch Putter, blockNumber uint32, blockHash common.Hash,
// We store the block hash + block number for each deleted node hash
// so a node hash can quickly be checked for from the journal database
// when running `handleInsertedKey`.
databaseKey := []byte(deletedNodeHashKeyPrefix + deletedNodeHash)
databaseKey := makeDeletedKey(deletedNodeHash)
err = batch.Put(databaseKey, encodedKey)
if err != nil {
return fmt.Errorf("putting journal key in database batch: %w", err)
Expand Down Expand Up @@ -501,3 +502,10 @@ func pruneBlockHashes(blockNumber uint32, batch Deleter) (err error) {
}
return nil
}

func makeDeletedKey(hash common.Hash) (key []byte) {
key = make([]byte, 0, len(deletedNodeHashKeyPrefix)+common.HashLength)
key = append(key, []byte(deletedNodeHashKeyPrefix)...)
key = append(key, hash.ToBytes()...)
return key
}
73 changes: 34 additions & 39 deletions internal/pruner/full_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func Test_FullNode_pruneAll(t *testing.T) {
key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}
encodedKey := scaleMarshal(t, key)
record := journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
}
encodedRecord := scaleMarshal(t, record)
database.EXPECT().Get(encodedKey).Return(encodedRecord, nil)
Expand All @@ -106,7 +106,7 @@ func Test_FullNode_pruneAll(t *testing.T) {
storageDatabase := NewMockChainDBNewBatcher(ctrl)
batch := NewMockBatch(ctrl)
storageDatabase.EXPECT().NewBatch().Return(batch)
batch.EXPECT().Del([]byte("deletedhash")).Return(nil)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
batch.EXPECT().Reset()
return storageDatabase
},
Expand Down Expand Up @@ -147,7 +147,7 @@ func Test_FullNode_pruneAll(t *testing.T) {
key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}
encodedKey := scaleMarshal(t, key)
record := journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
}
encodedRecord := scaleMarshal(t, record)
database.EXPECT().Get(encodedKey).Return(encodedRecord, nil)
Expand All @@ -158,7 +158,7 @@ func Test_FullNode_pruneAll(t *testing.T) {
storageDatabase := NewMockChainDBNewBatcher(ctrl)
batch := NewMockBatch(ctrl)
storageDatabase.EXPECT().NewBatch().Return(batch)
batch.EXPECT().Del([]byte("deletedhash")).Return(nil)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
batch.EXPECT().Flush().Return(errTest)
return storageDatabase
},
Expand Down Expand Up @@ -198,7 +198,7 @@ func Test_FullNode_pruneAll(t *testing.T) {
key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}
encodedKey := scaleMarshal(t, key)
record := journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
}
encodedRecord := scaleMarshal(t, record)
database.EXPECT().Get(encodedKey).Return(encodedRecord, nil)
Expand All @@ -209,7 +209,7 @@ func Test_FullNode_pruneAll(t *testing.T) {
storageDatabase := NewMockChainDBNewBatcher(ctrl)
batch := NewMockBatch(ctrl)
storageDatabase.EXPECT().NewBatch().Return(batch)
batch.EXPECT().Del([]byte("deletedhash")).Return(nil)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
batch.EXPECT().Flush().Return(nil)
return storageDatabase
},
Expand Down Expand Up @@ -329,7 +329,7 @@ func Test_prune(t *testing.T) {
key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}
encodedKey := scaleMarshal(t, key)
record := journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
}
encodedRecord := scaleMarshal(t, record)
database.EXPECT().Get(encodedKey).Return(encodedRecord, nil)
Expand All @@ -342,7 +342,7 @@ func Test_prune(t *testing.T) {
},
storageBatchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch.EXPECT().Del([]byte("deletedhash")).Return(nil)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
return batch
},
errWrapped: errTest,
Expand All @@ -359,7 +359,7 @@ func Test_prune(t *testing.T) {
key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}
encodedKey := scaleMarshal(t, key)
record := journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
}
encodedRecord := scaleMarshal(t, record)
database.EXPECT().Get(encodedKey).Return(encodedRecord, nil)
Expand All @@ -375,7 +375,7 @@ func Test_prune(t *testing.T) {
},
storageBatchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch.EXPECT().Del([]byte("deletedhash")).Return(nil)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil)
return batch
},
},
Expand Down Expand Up @@ -434,16 +434,16 @@ func Test_pruneStorage(t *testing.T) {
database := NewMockGetter(ctrl)
key := journalKey{BlockNumber: 10, BlockHash: common.Hash{1}}
encodedKey := scaleMarshal(t, key)
record := journalRecord{DeletedNodeHashes: map[string]struct{}{
"deleted_hash": {},
record := journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{
{3}: {},
}}
encodedRecord := scaleMarshal(t, record)
database.EXPECT().Get(encodedKey).Return(encodedRecord, nil)
return database
},
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch.EXPECT().Del([]byte("deleted_hash")).Return(errTest)
batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(errTest)
return batch
},
errWrapped: errTest,
Expand All @@ -457,28 +457,23 @@ func Test_pruneStorage(t *testing.T) {

key1 := journalKey{BlockNumber: 10, BlockHash: common.Hash{1}}
encodedKey1 := scaleMarshal(t, key1)
record1 := journalRecord{DeletedNodeHashes: map[string]struct{}{
"deleted_hash_1": {},
"deleted_hash_2": {},
}}
record1 := journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{{11}: {}, {12}: {}}}
encodedRecord1 := scaleMarshal(t, record1)
database.EXPECT().Get(encodedKey1).Return(encodedRecord1, nil)

key2 := journalKey{BlockNumber: 10, BlockHash: common.Hash{2}}
encodedKey2 := scaleMarshal(t, key2)
record2 := journalRecord{DeletedNodeHashes: map[string]struct{}{
"deleted_hash_3": {},
}}
record2 := journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{{13}: {}}}
encodedRecord2 := scaleMarshal(t, record2)
database.EXPECT().Get(encodedKey2).Return(encodedRecord2, nil)

return database
},
batchBuilder: func(ctrl *gomock.Controller) Deleter {
batch := NewMockDeleter(ctrl)
batch.EXPECT().Del([]byte("deleted_hash_1")).Return(nil)
batch.EXPECT().Del([]byte("deleted_hash_2")).Return(nil)
batch.EXPECT().Del([]byte("deleted_hash_3")).Return(nil)
batch.EXPECT().Del(common.Hash{11}.ToBytes()).Return(nil)
batch.EXPECT().Del(common.Hash{12}.ToBytes()).Return(nil)
batch.EXPECT().Del(common.Hash{13}.ToBytes()).Return(nil)
return batch
},
},
Expand Down Expand Up @@ -589,57 +584,57 @@ func Test_storeJournalRecord(t *testing.T) {
"deleted node hash put error": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
databaseKey := []byte("deleted_" + "deletedhash")
databaseKey := makeDeletedKey(common.Hash{3})
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
database.EXPECT().Put(databaseKey, encodedKey).Return(errTest)
return database
},
blockNumber: 1,
blockHash: common.Hash{2},
record: journalRecord{DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}},
record: journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}},
errWrapped: errTest,
errMessage: "putting journal key in database batch: test error",
},
"encoded record put error": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
databaseKey := []byte("deleted_" + "deletedhash")
databaseKey := makeDeletedKey(common.Hash{3})
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
database.EXPECT().Put(databaseKey, encodedKey).Return(nil)
encodedRecord := scaleMarshal(t, journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
InsertedNodeHashes: map[string]struct{}{"insertedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}},
})
database.EXPECT().Put(encodedKey, encodedRecord).Return(errTest)
return database
},
blockNumber: 1,
blockHash: common.Hash{2},
record: journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
InsertedNodeHashes: map[string]struct{}{"insertedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}},
},
errWrapped: errTest,
errMessage: "putting journal record in database batch: test error",
},
"success": {
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutter(ctrl)
databaseKey := []byte("deleted_" + "deletedhash")
databaseKey := makeDeletedKey(common.Hash{3})
encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
database.EXPECT().Put(databaseKey, encodedKey).Return(nil)
encodedRecord := scaleMarshal(t, journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
InsertedNodeHashes: map[string]struct{}{"insertedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}},
})
database.EXPECT().Put(encodedKey, encodedRecord).Return(nil)
return database
},
blockNumber: 1,
blockHash: common.Hash{2},
record: journalRecord{
DeletedNodeHashes: map[string]struct{}{"deletedhash": {}},
InsertedNodeHashes: map[string]struct{}{"insertedhash": {}},
DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}},
},
},
}
Expand Down Expand Up @@ -704,17 +699,17 @@ func Test_getJournalRecord(t *testing.T) {
database := NewMockGetter(ctrl)
expectedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}})
returnedValue := scaleMarshal(t, journalRecord{
InsertedNodeHashes: map[string]struct{}{"a": {}, "b": {}},
DeletedNodeHashes: map[string]struct{}{"b": {}, "c": {}},
InsertedNodeHashes: map[common.Hash]struct{}{{1}: {}, {2}: {}},
DeletedNodeHashes: map[common.Hash]struct{}{{2}: {}, {3}: {}},
})
database.EXPECT().Get(expectedKey).Return(returnedValue, nil)
return database
},
blockNumber: 1,
blockHash: common.Hash{2},
record: journalRecord{
InsertedNodeHashes: map[string]struct{}{"a": {}, "b": {}},
DeletedNodeHashes: map[string]struct{}{"b": {}, "c": {}},
InsertedNodeHashes: map[common.Hash]struct{}{{1}: {}, {2}: {}},
DeletedNodeHashes: map[common.Hash]struct{}{{2}: {}, {3}: {}},
},
},
}
Expand Down

0 comments on commit adec1e3

Please sign in to comment.