diff --git a/dot/state/storage.go b/dot/state/storage.go index c732b8a2369..ef492f83096 100644 --- a/dot/state/storage.go +++ b/dot/state/storage.go @@ -76,7 +76,19 @@ func (s *StorageState) StoreTrie(ts *rtstorage.TrieState, header *types.Header) return fmt.Errorf("failed to get state trie inserted keys: block %s %w", header.Hash(), err) } - err = s.pruner.StoreJournalRecord(deletedMerkleValues, insertedMerkleValues, header.Hash(), int64(header.Number)) + // Temporary work-around until we drop merkle values for node hashes in another PR (already opened). + insertedNodeHashes := make(map[common.Hash]struct{}, len(insertedMerkleValues)) + for k := range insertedMerkleValues { + nodeHash := common.NewHash([]byte(k)) + insertedNodeHashes[nodeHash] = struct{}{} + } + deletedNodeHashes := make(map[common.Hash]struct{}, len(deletedMerkleValues)) + for k := range deletedMerkleValues { + nodeHash := common.NewHash([]byte(k)) + deletedNodeHashes[nodeHash] = struct{}{} + } + + err = s.pruner.StoreJournalRecord(deletedNodeHashes, insertedNodeHashes, header.Hash(), uint32(header.Number)) if err != nil { return err } diff --git a/internal/pruner/archive.go b/internal/pruner/archive.go index 7fe01df2477..bb8cb33655a 100644 --- a/internal/pruner/archive.go +++ b/internal/pruner/archive.go @@ -14,6 +14,6 @@ func NewArchiveNode() *ArchiveNode { } // StoreJournalRecord for archive node doesn't do anything. -func (*ArchiveNode) StoreJournalRecord(_, _ map[string]struct{}, _ common.Hash, _ uint32) (_ error) { +func (*ArchiveNode) StoreJournalRecord(_, _ map[common.Hash]struct{}, _ common.Hash, _ uint32) (_ error) { return nil } diff --git a/internal/pruner/full.go b/internal/pruner/full.go index 947f5ae0858..d322db86abc 100644 --- a/internal/pruner/full.go +++ b/internal/pruner/full.go @@ -55,10 +55,10 @@ type journalKey struct { type journalRecord struct { // InsertedNodeHashes is the set of node hashes of the trie nodes // inserted in the trie for the block. - InsertedNodeHashes map[string]struct{} + InsertedNodeHashes map[common.Hash]struct{} // DeletedNodeHashes is the set of node hashes of the trie nodes // removed from the trie for the block. - DeletedNodeHashes map[string]struct{} + DeletedNodeHashes map[common.Hash]struct{} } // NewFullNode creates a full node pruner. @@ -106,7 +106,7 @@ func NewFullNode(journalDB JournalDatabase, storageDB ChainDBNewBatcher, retainB // StoreJournalRecord stores the trie deltas impacting the storage database for a particular // block hash. It prunes all block numbers falling off the window of block numbers to keep, // before inserting the new record. It is thread safe to call. -func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[string]struct{}, +func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[common.Hash]struct{}, blockHash common.Hash, blockNumber uint32) (err error) { p.mutex.Lock() defer p.mutex.Unlock() @@ -185,24 +185,25 @@ func (p *FullNode) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[ return nil } -func (p *FullNode) handleInsertedKeys(insertedNodeHashes map[string]struct{}, +func (p *FullNode) handleInsertedKeys(insertedNodeHashes map[common.Hash]struct{}, blockNumber uint32, blockHash common.Hash, journalDBBatch Putter) (err error) { for insertedNodeHash := range insertedNodeHashes { err = p.handleInsertedKey(insertedNodeHash, blockNumber, blockHash, journalDBBatch) if err != nil { - return fmt.Errorf("handling inserted key 0x%x: %w", - []byte(insertedNodeHash), err) + return fmt.Errorf("handling inserted key %s: %w", + insertedNodeHash, err) } } return nil } -func (p *FullNode) handleInsertedKey(insertedNodeHash string, blockNumber uint32, +func (p *FullNode) handleInsertedKey(insertedNodeHash common.Hash, blockNumber uint32, blockHash common.Hash, journalDBBatch Putter) (err error) { // Try to find if the node hash was deleted in another block before // since we no longer want to prune it, as it was re-inserted. - journalKeyDeletedAt, err := p.journalDatabase.Get([]byte(deletedNodeHashKeyPrefix + insertedNodeHash)) + deletedNodeHashKey := makeDeletedKey(insertedNodeHash) + journalKeyDeletedAt, err := p.journalDatabase.Get(deletedNodeHashKey) nodeHashDeletedInAnotherBlock := errors.Is(err, chaindb.ErrKeyNotFound) if !nodeHashDeletedInAnotherBlock { return nil @@ -323,7 +324,7 @@ func pruneStorage(blockNumber uint32, blockHashes []common.Hash, } for deletedNodeHash := range record.DeletedNodeHashes { - err = batch.Del([]byte(deletedNodeHash)) + err = batch.Del(deletedNodeHash.ToBytes()) if err != nil { return fmt.Errorf("deleting key from batch: %w", err) } @@ -372,7 +373,7 @@ func storeJournalRecord(batch Putter, blockNumber uint32, blockHash common.Hash, // We store the block hash + block number for each deleted node hash // so a node hash can quickly be checked for from the journal database // when running `handleInsertedKey`. - databaseKey := []byte(deletedNodeHashKeyPrefix + deletedNodeHash) + databaseKey := makeDeletedKey(deletedNodeHash) err = batch.Put(databaseKey, encodedKey) if err != nil { return fmt.Errorf("putting journal key in database batch: %w", err) @@ -501,3 +502,10 @@ func pruneBlockHashes(blockNumber uint32, batch Deleter) (err error) { } return nil } + +func makeDeletedKey(hash common.Hash) (key []byte) { + key = make([]byte, 0, len(deletedNodeHashKeyPrefix)+common.HashLength) + key = append(key, []byte(deletedNodeHashKeyPrefix)...) + key = append(key, hash.ToBytes()...) + return key +} diff --git a/internal/pruner/full_test.go b/internal/pruner/full_test.go index 67112154af6..719d36a82d3 100644 --- a/internal/pruner/full_test.go +++ b/internal/pruner/full_test.go @@ -95,7 +95,7 @@ func Test_FullNode_pruneAll(t *testing.T) { key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}} encodedKey := scaleMarshal(t, key) record := journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, } encodedRecord := scaleMarshal(t, record) database.EXPECT().Get(encodedKey).Return(encodedRecord, nil) @@ -106,7 +106,7 @@ func Test_FullNode_pruneAll(t *testing.T) { storageDatabase := NewMockChainDBNewBatcher(ctrl) batch := NewMockBatch(ctrl) storageDatabase.EXPECT().NewBatch().Return(batch) - batch.EXPECT().Del([]byte("deletedhash")).Return(nil) + batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil) batch.EXPECT().Reset() return storageDatabase }, @@ -147,7 +147,7 @@ func Test_FullNode_pruneAll(t *testing.T) { key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}} encodedKey := scaleMarshal(t, key) record := journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, } encodedRecord := scaleMarshal(t, record) database.EXPECT().Get(encodedKey).Return(encodedRecord, nil) @@ -158,7 +158,7 @@ func Test_FullNode_pruneAll(t *testing.T) { storageDatabase := NewMockChainDBNewBatcher(ctrl) batch := NewMockBatch(ctrl) storageDatabase.EXPECT().NewBatch().Return(batch) - batch.EXPECT().Del([]byte("deletedhash")).Return(nil) + batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil) batch.EXPECT().Flush().Return(errTest) return storageDatabase }, @@ -198,7 +198,7 @@ func Test_FullNode_pruneAll(t *testing.T) { key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}} encodedKey := scaleMarshal(t, key) record := journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, } encodedRecord := scaleMarshal(t, record) database.EXPECT().Get(encodedKey).Return(encodedRecord, nil) @@ -209,7 +209,7 @@ func Test_FullNode_pruneAll(t *testing.T) { storageDatabase := NewMockChainDBNewBatcher(ctrl) batch := NewMockBatch(ctrl) storageDatabase.EXPECT().NewBatch().Return(batch) - batch.EXPECT().Del([]byte("deletedhash")).Return(nil) + batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil) batch.EXPECT().Flush().Return(nil) return storageDatabase }, @@ -329,7 +329,7 @@ func Test_prune(t *testing.T) { key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}} encodedKey := scaleMarshal(t, key) record := journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, } encodedRecord := scaleMarshal(t, record) database.EXPECT().Get(encodedKey).Return(encodedRecord, nil) @@ -342,7 +342,7 @@ func Test_prune(t *testing.T) { }, storageBatchBuilder: func(ctrl *gomock.Controller) Deleter { batch := NewMockDeleter(ctrl) - batch.EXPECT().Del([]byte("deletedhash")).Return(nil) + batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil) return batch }, errWrapped: errTest, @@ -359,7 +359,7 @@ func Test_prune(t *testing.T) { key := journalKey{BlockNumber: 1, BlockHash: common.Hash{2}} encodedKey := scaleMarshal(t, key) record := journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, } encodedRecord := scaleMarshal(t, record) database.EXPECT().Get(encodedKey).Return(encodedRecord, nil) @@ -375,7 +375,7 @@ func Test_prune(t *testing.T) { }, storageBatchBuilder: func(ctrl *gomock.Controller) Deleter { batch := NewMockDeleter(ctrl) - batch.EXPECT().Del([]byte("deletedhash")).Return(nil) + batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(nil) return batch }, }, @@ -434,8 +434,8 @@ func Test_pruneStorage(t *testing.T) { database := NewMockGetter(ctrl) key := journalKey{BlockNumber: 10, BlockHash: common.Hash{1}} encodedKey := scaleMarshal(t, key) - record := journalRecord{DeletedNodeHashes: map[string]struct{}{ - "deleted_hash": {}, + record := journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{ + {3}: {}, }} encodedRecord := scaleMarshal(t, record) database.EXPECT().Get(encodedKey).Return(encodedRecord, nil) @@ -443,7 +443,7 @@ func Test_pruneStorage(t *testing.T) { }, batchBuilder: func(ctrl *gomock.Controller) Deleter { batch := NewMockDeleter(ctrl) - batch.EXPECT().Del([]byte("deleted_hash")).Return(errTest) + batch.EXPECT().Del(common.Hash{3}.ToBytes()).Return(errTest) return batch }, errWrapped: errTest, @@ -457,18 +457,13 @@ func Test_pruneStorage(t *testing.T) { key1 := journalKey{BlockNumber: 10, BlockHash: common.Hash{1}} encodedKey1 := scaleMarshal(t, key1) - record1 := journalRecord{DeletedNodeHashes: map[string]struct{}{ - "deleted_hash_1": {}, - "deleted_hash_2": {}, - }} + record1 := journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{{11}: {}, {12}: {}}} encodedRecord1 := scaleMarshal(t, record1) database.EXPECT().Get(encodedKey1).Return(encodedRecord1, nil) key2 := journalKey{BlockNumber: 10, BlockHash: common.Hash{2}} encodedKey2 := scaleMarshal(t, key2) - record2 := journalRecord{DeletedNodeHashes: map[string]struct{}{ - "deleted_hash_3": {}, - }} + record2 := journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{{13}: {}}} encodedRecord2 := scaleMarshal(t, record2) database.EXPECT().Get(encodedKey2).Return(encodedRecord2, nil) @@ -476,9 +471,9 @@ func Test_pruneStorage(t *testing.T) { }, batchBuilder: func(ctrl *gomock.Controller) Deleter { batch := NewMockDeleter(ctrl) - batch.EXPECT().Del([]byte("deleted_hash_1")).Return(nil) - batch.EXPECT().Del([]byte("deleted_hash_2")).Return(nil) - batch.EXPECT().Del([]byte("deleted_hash_3")).Return(nil) + batch.EXPECT().Del(common.Hash{11}.ToBytes()).Return(nil) + batch.EXPECT().Del(common.Hash{12}.ToBytes()).Return(nil) + batch.EXPECT().Del(common.Hash{13}.ToBytes()).Return(nil) return batch }, }, @@ -589,26 +584,26 @@ func Test_storeJournalRecord(t *testing.T) { "deleted node hash put error": { batchBuilder: func(ctrl *gomock.Controller) Putter { database := NewMockPutter(ctrl) - databaseKey := []byte("deleted_" + "deletedhash") + databaseKey := makeDeletedKey(common.Hash{3}) encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}) database.EXPECT().Put(databaseKey, encodedKey).Return(errTest) return database }, blockNumber: 1, blockHash: common.Hash{2}, - record: journalRecord{DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}}, + record: journalRecord{DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}}, errWrapped: errTest, errMessage: "putting journal key in database batch: test error", }, "encoded record put error": { batchBuilder: func(ctrl *gomock.Controller) Putter { database := NewMockPutter(ctrl) - databaseKey := []byte("deleted_" + "deletedhash") + databaseKey := makeDeletedKey(common.Hash{3}) encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}) database.EXPECT().Put(databaseKey, encodedKey).Return(nil) encodedRecord := scaleMarshal(t, journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, - InsertedNodeHashes: map[string]struct{}{"insertedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, + InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}}, }) database.EXPECT().Put(encodedKey, encodedRecord).Return(errTest) return database @@ -616,8 +611,8 @@ func Test_storeJournalRecord(t *testing.T) { blockNumber: 1, blockHash: common.Hash{2}, record: journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, - InsertedNodeHashes: map[string]struct{}{"insertedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, + InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}}, }, errWrapped: errTest, errMessage: "putting journal record in database batch: test error", @@ -625,12 +620,12 @@ func Test_storeJournalRecord(t *testing.T) { "success": { batchBuilder: func(ctrl *gomock.Controller) Putter { database := NewMockPutter(ctrl) - databaseKey := []byte("deleted_" + "deletedhash") + databaseKey := makeDeletedKey(common.Hash{3}) encodedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}) database.EXPECT().Put(databaseKey, encodedKey).Return(nil) encodedRecord := scaleMarshal(t, journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, - InsertedNodeHashes: map[string]struct{}{"insertedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, + InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}}, }) database.EXPECT().Put(encodedKey, encodedRecord).Return(nil) return database @@ -638,8 +633,8 @@ func Test_storeJournalRecord(t *testing.T) { blockNumber: 1, blockHash: common.Hash{2}, record: journalRecord{ - DeletedNodeHashes: map[string]struct{}{"deletedhash": {}}, - InsertedNodeHashes: map[string]struct{}{"insertedhash": {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{3}: {}}, + InsertedNodeHashes: map[common.Hash]struct{}{{4}: {}}, }, }, } @@ -704,8 +699,8 @@ func Test_getJournalRecord(t *testing.T) { database := NewMockGetter(ctrl) expectedKey := scaleMarshal(t, journalKey{BlockNumber: 1, BlockHash: common.Hash{2}}) returnedValue := scaleMarshal(t, journalRecord{ - InsertedNodeHashes: map[string]struct{}{"a": {}, "b": {}}, - DeletedNodeHashes: map[string]struct{}{"b": {}, "c": {}}, + InsertedNodeHashes: map[common.Hash]struct{}{{1}: {}, {2}: {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{2}: {}, {3}: {}}, }) database.EXPECT().Get(expectedKey).Return(returnedValue, nil) return database @@ -713,8 +708,8 @@ func Test_getJournalRecord(t *testing.T) { blockNumber: 1, blockHash: common.Hash{2}, record: journalRecord{ - InsertedNodeHashes: map[string]struct{}{"a": {}, "b": {}}, - DeletedNodeHashes: map[string]struct{}{"b": {}, "c": {}}, + InsertedNodeHashes: map[common.Hash]struct{}{{1}: {}, {2}: {}}, + DeletedNodeHashes: map[common.Hash]struct{}{{2}: {}, {3}: {}}, }, }, }