From e8a78f2c936e1ae23c23019ae3fb155a3185ce99 Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Tue, 30 May 2023 16:24:20 +0200 Subject: [PATCH 01/16] Use LevelDB batch write for WriteFullBlock --- blockchain/blockchain.go | 55 +++-- blockchain/blockchain_test.go | 71 +++++- blockchain/storage/batch.go | 129 +++++++++++ blockchain/storage/keyvalue.go | 5 + blockchain/storage/leveldb/leveldb.go | 4 + blockchain/storage/leveldb/leveldb_test.go | 257 +++++++++++++++++++++ blockchain/storage/memory/memory.go | 4 + blockchain/storage/storage.go | 2 + blockchain/storage/testing.go | 4 + 9 files changed, 500 insertions(+), 31 deletions(-) create mode 100644 blockchain/storage/batch.go diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index fd99fbf58e..d84e9b1b82 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -441,14 +441,14 @@ func (b *Blockchain) GetTD(hash types.Hash) (*big.Int, bool) { } // writeCanonicalHeader writes the new header -func (b *Blockchain) writeCanonicalHeader(event *Event, h *types.Header) error { +func (b *Blockchain) writeCanonicalHeader(batch *storage.Batch, event *Event, h *types.Header) error { parentTD, ok := b.readTotalDifficulty(h.ParentHash) if !ok { return fmt.Errorf("parent difficulty not found") } newTD := big.NewInt(0).Add(parentTD, new(big.Int).SetUint64(h.Difficulty)) - if err := b.db.WriteCanonicalHeader(h, newTD); err != nil { + if err := batch.WriteCanonicalHeader(h, newTD); err != nil { return err } @@ -611,6 +611,8 @@ func (b *Blockchain) WriteHeaders(headers []*types.Header) error { // WriteHeadersWithBodies writes a batch of headers func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { + batch := b.db.NewBatch() + // Check the size if len(headers) == 0 { return fmt.Errorf("passed in headers array is empty") @@ -637,7 +639,7 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { // Write the actual headers for _, h := range headers { event := &Event{} - if err := b.writeHeaderImpl(event, h); err != nil { + if err := b.writeHeaderImpl(batch, event, h); err != nil { return err } @@ -645,6 +647,10 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { b.dispatchEvent(event) } + if err := batch.Write(); err != nil { + return err + } + return nil } @@ -857,11 +863,12 @@ func (b *Blockchain) executeBlockTransactions(block *types.Block) (*BlockResult, // This function is a copy of WriteBlock but with a full block which does not // require to compute again the Receipts. func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) error { - block := fblock.Block - b.writeLock.Lock() defer b.writeLock.Unlock() + block := fblock.Block + batch := b.db.NewBatch() + if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -870,20 +877,20 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro header := block.Header - if err := b.writeBody(block); err != nil { + if err := b.writeBody(batch, block); err != nil { return err } // Write the header to the chain evnt := &Event{Source: source} - if err := b.writeHeaderImpl(evnt, header); err != nil { + if err := b.writeHeaderImpl(batch, evnt, header); err != nil { return err } // write the receipts, do it only after the header has been written. // Otherwise, a client might ask for a header once the receipt is valid, // but before it is written into the storage - if err := b.db.WriteReceipts(block.Hash(), fblock.Receipts); err != nil { + if err := batch.WriteReceipts(block.Hash(), fblock.Receipts); err != nil { return err } @@ -910,6 +917,10 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro logArgs = append(logArgs, "generation_time_in_seconds", diff) } + if err := batch.Write(); err != nil { + return err + } + b.logger.Info("new block", logArgs...) return nil @@ -918,8 +929,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // WriteBlock writes a single block to the local blockchain. // It doesn't do any kind of verification, only commits the block to the DB func (b *Blockchain) WriteBlock(block *types.Block, source string) error { - b.writeLock.Lock() - defer b.writeLock.Unlock() + batch := b.db.NewBatch() if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -929,13 +939,13 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { header := block.Header - if err := b.writeBody(block); err != nil { + if err := b.writeBody(batch, block); err != nil { return err } // Write the header to the chain evnt := &Event{Source: source} - if err := b.writeHeaderImpl(evnt, header); err != nil { + if err := b.writeHeaderImpl(batch, evnt, header); err != nil { return err } @@ -975,6 +985,13 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { logArgs = append(logArgs, "generation_time_in_seconds", diff) } + b.writeLock.Lock() + defer b.writeLock.Unlock() + + if err := batch.Write(); err != nil { + return err + } + b.logger.Info("new block", logArgs...) return nil @@ -1037,7 +1054,7 @@ func (b *Blockchain) updateGasPriceAvgWithBlock(block *types.Block) { // writeBody writes the block body to the DB. // Additionally, it also updates the txn lookup, for txnHash -> block lookups -func (b *Blockchain) writeBody(block *types.Block) error { +func (b *Blockchain) writeBody(batch *storage.Batch, block *types.Block) error { // Recover 'from' field in tx before saving // Because the block passed from the consensus layer doesn't have from field in tx, // due to missing encoding in RLP @@ -1046,13 +1063,13 @@ func (b *Blockchain) writeBody(block *types.Block) error { } // Write the full body (txns + receipts) - if err := b.db.WriteBody(block.Header.Hash, block.Body()); err != nil { + if err := batch.WriteBody(block.Header.Hash, block.Body()); err != nil { return err } // Write txn lookups (txHash -> block) for _, txn := range block.Transactions { - if err := b.db.WriteTxLookup(txn.Hash, block.Hash()); err != nil { + if err := batch.WriteTxLookup(txn.Hash, block.Hash()); err != nil { return err } } @@ -1188,16 +1205,16 @@ func (b *Blockchain) dispatchEvent(evnt *Event) { } // writeHeaderImpl writes a block and the data, assumes the genesis is already set -func (b *Blockchain) writeHeaderImpl(evnt *Event, header *types.Header) error { +func (b *Blockchain) writeHeaderImpl(batch *storage.Batch, evnt *Event, header *types.Header) error { currentHeader := b.Header() // Write the data if header.ParentHash == currentHeader.Hash { // Fast path to save the new canonical header - return b.writeCanonicalHeader(evnt, header) + return b.writeCanonicalHeader(batch, evnt, header) } - if err := b.db.WriteHeader(header); err != nil { + if err := batch.WriteHeader(header); err != nil { return err } @@ -1217,7 +1234,7 @@ func (b *Blockchain) writeHeaderImpl(evnt *Event, header *types.Header) error { } // Write the difficulty - if err := b.db.WriteTotalDifficulty( + if err := batch.WriteTotalDifficulty( header.Hash, big.NewInt(0).Add( parentTD, diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index b8de3dcffd..d0a24f7b50 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" "math/big" + "os" + "path" "reflect" "testing" @@ -14,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/0xPolygon/polygon-edge/blockchain/storage" - "github.com/0xPolygon/polygon-edge/blockchain/storage/memory" + "github.com/0xPolygon/polygon-edge/blockchain/storage/leveldb" "github.com/0xPolygon/polygon-edge/types" ) @@ -553,11 +555,11 @@ func TestBlockchainWriteBody(t *testing.T) { newChain := func( t *testing.T, txFromByTxHash map[types.Hash]types.Address, + path string, ) *Blockchain { t.Helper() - storage, err := memory.NewMemoryStorage(nil) - assert.NoError(t, err) + storage, _ := newStorageP(t, path) chain := &Blockchain{ db: storage, @@ -590,12 +592,15 @@ func TestBlockchainWriteBody(t *testing.T) { txFromByTxHash := map[types.Hash]types.Address{} - chain := newChain(t, txFromByTxHash) + chain := newChain(t, txFromByTxHash, "t1") + defer chain.db.Close() + batch := chain.db.NewBatch() assert.NoError( t, - chain.writeBody(block), + chain.writeBody(batch, block), ) + assert.NoError(t, batch.Write()) }) t.Run("should return error if tx doesn't have from and recovering address fails", func(t *testing.T) { @@ -618,13 +623,16 @@ func TestBlockchainWriteBody(t *testing.T) { txFromByTxHash := map[types.Hash]types.Address{} - chain := newChain(t, txFromByTxHash) + chain := newChain(t, txFromByTxHash, "t2") + defer chain.db.Close() + batch := chain.db.NewBatch() assert.ErrorIs( t, errRecoveryAddressFailed, - chain.writeBody(block), + chain.writeBody(batch, block), ) + assert.NoError(t, batch.Write()) }) t.Run("should recover from address and store to storage", func(t *testing.T) { @@ -649,9 +657,12 @@ func TestBlockchainWriteBody(t *testing.T) { tx.Hash: addr, } - chain := newChain(t, txFromByTxHash) + chain := newChain(t, txFromByTxHash, "t3") + defer chain.db.Close() + batch := chain.db.NewBatch() - assert.NoError(t, chain.writeBody(block)) + assert.NoError(t, chain.writeBody(batch, block)) + assert.NoError(t, batch.Write()) readBody, ok := chain.readBody(block.Hash()) assert.True(t, ok) @@ -854,8 +865,7 @@ func Test_recoverFromFieldsInTransactions(t *testing.T) { } func TestBlockchainReadBody(t *testing.T) { - storage, err := memory.NewMemoryStorage(nil) - assert.NoError(t, err) + storage, _ := newStorageP(t, "TestBlockchainReadBody") txFromByTxHash := make(map[types.Hash]types.Address) addr := types.StringToAddress("1") @@ -868,6 +878,8 @@ func TestBlockchainReadBody(t *testing.T) { }, } + batch := b.db.NewBatch() + tx := &types.Transaction{ Value: big.NewInt(10), V: big.NewInt(1), @@ -886,10 +898,12 @@ func TestBlockchainReadBody(t *testing.T) { txFromByTxHash[tx.Hash] = types.ZeroAddress - if err := b.writeBody(block); err != nil { + if err := b.writeBody(batch, block); err != nil { t.Fatal(err) } + assert.NoError(t, batch.Write()) + txFromByTxHash[tx.Hash] = addr readBody, found := b.readBody(block.Hash()) @@ -1391,3 +1405,36 @@ func TestBlockchain_CalculateBaseFee(t *testing.T) { }) } } + +func newStorageP(t *testing.T, testPath string) (storage.Storage, func()) { + t.Helper() + + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + p := path.Join(dir, "/tmp/storage/", testPath) + + err = os.MkdirAll(p, 0755) + if err != nil { + t.Fatal(err) + } + + s, err := leveldb.NewLevelDBStorage(p, hclog.NewNullLogger()) + if err != nil { + t.Fatal(err) + } + + closeFn := func() { + if err := s.Close(); err != nil { + t.Fatal(err) + } + + if err := os.RemoveAll(p); err != nil { + t.Fatal(err) + } + } + + return s, closeFn +} diff --git a/blockchain/storage/batch.go b/blockchain/storage/batch.go new file mode 100644 index 0000000000..f1e9e4a367 --- /dev/null +++ b/blockchain/storage/batch.go @@ -0,0 +1,129 @@ +package storage + +import ( + "encoding/binary" + "math/big" + + "github.com/0xPolygon/polygon-edge/types" + "github.com/syndtr/goleveldb/leveldb" + "github.com/umbracle/fastrlp" +) + +type Batch struct { + DB *leveldb.DB + B *leveldb.Batch + Size int +} + +func NewBatch(db *leveldb.DB) *Batch { + return &Batch{ + DB: db, + B: new(leveldb.Batch), + } +} + +func (b *Batch) WriteHeader(h *types.Header) error { + return b.writeRLP(HEADER, h.Hash.Bytes(), h) +} + +func (b *Batch) WriteBody(hash types.Hash, body *types.Body) error { + return b.writeRLP(BODY, hash.Bytes(), body) +} + +// Delete inserts the a key removal into the batch for later committing. +func (b *Batch) Delete(key []byte) error { + b.B.Delete(key) + b.Size += len(key) + + return nil +} + +func (b *Batch) WriteHeadHash(h types.Hash) error { + return b.add(HEAD, HASH, h.Bytes()) +} + +func (b *Batch) WriteTxLookup(hash types.Hash, blockHash types.Hash) error { + ar := &fastrlp.Arena{} + vr := ar.NewBytes(blockHash.Bytes()) + + return b.write2(TX_LOOKUP_PREFIX, hash.Bytes(), vr) +} + +func (b *Batch) encodeUint(n uint64) []byte { + bites := make([]byte, 8) + binary.BigEndian.PutUint64(bites[:], n) + + return bites[:] +} + +func (b *Batch) WriteHeadNumber(n uint64) error { + return b.add(HEAD, NUMBER, b.encodeUint(n)) +} + +func (b *Batch) write2(p, k []byte, v *fastrlp.Value) error { + dst := v.MarshalTo(nil) + + return b.add(p, k, dst) +} +func (b *Batch) WriteReceipts(hash types.Hash, receipts []*types.Receipt) error { + rr := types.Receipts(receipts) + + return b.writeRLP(RECEIPTS, hash.Bytes(), &rr) +} + +// Write flushes any accumulated data to disk. +func (b *Batch) Write() error { + return b.DB.Write(b.B, nil) +} + +func (b *Batch) add(p []byte, k []byte, data []byte) error { + p = append(p, k...) + b.B.Put(p, data) + + b.Size += len(p) + len(data) + + return nil +} + +func (b *Batch) writeRLP(p, k []byte, raw types.RLPMarshaler) error { + var data []byte + if obj, ok := raw.(types.RLPStoreMarshaler); ok { + data = obj.MarshalStoreRLPTo(nil) + } else { + data = raw.MarshalRLPTo(nil) + } + + return b.add(p, k, data) +} + +func (b *Batch) WriteCanonicalHeader(h *types.Header, diff *big.Int) error { + if err := b.WriteHeader(h); err != nil { + return err + } + + if err := b.WriteHeadHash(h.Hash); err != nil { + return err + } + + if err := b.WriteHeadNumber(h.Number); err != nil { + return err + } + + if err := b.WriteCanonicalHash(h.Number, h.Hash); err != nil { + return err + } + + if err := b.WriteTotalDifficulty(h.Hash, diff); err != nil { + return err + } + + return nil +} + +func (b *Batch) WriteCanonicalHash(n uint64, hash types.Hash) error { + return b.add(CANONICAL, b.encodeUint(n), hash.Bytes()) +} + +func (b *Batch) WriteTotalDifficulty(hash types.Hash, diff *big.Int) error { + return b.add(DIFFICULTY, hash.Bytes(), diff.Bytes()) +} diff --git a/blockchain/storage/keyvalue.go b/blockchain/storage/keyvalue.go index fd1494e86f..801faffeda 100644 --- a/blockchain/storage/keyvalue.go +++ b/blockchain/storage/keyvalue.go @@ -55,6 +55,7 @@ type KV interface { Close() error Set(p []byte, v []byte) error Get(p []byte) ([]byte, bool, error) + NewBatch() *Batch } // KeyValueStorage is a generic storage for kv databases @@ -350,3 +351,7 @@ func (s *KeyValueStorage) get(p []byte, k []byte) ([]byte, bool) { func (s *KeyValueStorage) Close() error { return s.db.Close() } + +func (s *KeyValueStorage) NewBatch() *Batch { + return s.db.NewBatch() +} diff --git a/blockchain/storage/leveldb/leveldb.go b/blockchain/storage/leveldb/leveldb.go index 02a4c5593b..c126d43679 100644 --- a/blockchain/storage/leveldb/leveldb.go +++ b/blockchain/storage/leveldb/leveldb.go @@ -63,3 +63,7 @@ func (l *levelDBKV) Get(p []byte) ([]byte, bool, error) { func (l *levelDBKV) Close() error { return l.db.Close() } + +func (l *levelDBKV) NewBatch() *storage.Batch { + return storage.NewBatch(l.db) +} diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index e3ea4aef86..2a5f4f8784 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -1,10 +1,20 @@ package leveldb import ( + "context" + "fmt" + "math/big" + "math/rand" "os" + "os/signal" + "path/filepath" + "syscall" "testing" + "time" + "github.com/0xPolygon/polygon-edge/blockchain" "github.com/0xPolygon/polygon-edge/blockchain/storage" + "github.com/0xPolygon/polygon-edge/types" "github.com/hashicorp/go-hclog" ) @@ -37,3 +47,250 @@ func newStorage(t *testing.T) (storage.Storage, func()) { func TestStorage(t *testing.T) { storage.TestStorage(t, newStorage) } + +func generateRandomByteSlice(count int) []byte { + s := make([]byte, count) + for i := 0; i < count; i++ { + s[i] = byte(rand.Int()) + } + + return s +} + +func generateFullTx(nonce uint64, from types.Address, to *types.Address, value *big.Int, dynamic bool, v *big.Int) *types.Transaction { + tx := &types.Transaction{} + + tx.Gas = types.StateTransactionGasLimit + tx.Nonce = nonce + tx.From = from + tx.To = to + tx.Value = value + tx.V = v + tx.Input = generateRandomByteSlice(1000) + tx.Hash = types.BytesToHash(generateRandomByteSlice(32)) + + if dynamic { + tx.Type = types.DynamicFeeTx + tx.GasFeeCap = v + tx.GasTipCap = v + } else { + tx.Type = types.LegacyTx + tx.GasPrice = v + } + + return tx +} + +func generateFullTxs(t *testing.T, startNonce, count int, from types.Address, to *types.Address) []*types.Transaction { + t.Helper() + + v := big.NewInt(1) + txs := make([]*types.Transaction, count) + + for i := 0; i < count; i++ { + txs[i] = generateFullTx(uint64(startNonce+i), from, to, big.NewInt(1), false, v) + } + + return txs +} + +var ( + addr1 = types.StringToAddress("1") + addr2 = types.StringToAddress("2") +) + +func generateBlock(t *testing.T, num uint64) *types.FullBlock { + t.Helper() + + b := &types.FullBlock{} + + b.Block = &types.Block{} + b.Block.Header = &types.Header{ + Number: num, + ExtraData: generateRandomByteSlice(32), + Hash: types.BytesToHash(generateRandomByteSlice(32)), + } + + b.Block.Transactions = generateFullTxs(t, 0, 2500, addr1, &addr2) + b.Receipts = make([]*types.Receipt, len(b.Block.Transactions)) + b.Block.Uncles = blockchain.NewTestHeaders(10) + + var status types.ReceiptStatus = types.ReceiptSuccess + + logs := make([]*types.Log, 10) + + for i := 0; i < 10; i++ { + logs[i] = &types.Log{ + Address: addr1, + Topics: []types.Hash{types.StringToHash("topic1"), types.StringToHash("topic2"), types.StringToHash("topic3")}, + Data: []byte{0xaa, 0xbb, 0xcc, 0xdd, 0xbb, 0xaa, 0x01, 0x012}, + } + } + + for i := 0; i < len(b.Block.Transactions); i++ { + b.Receipts[i] = &types.Receipt{ + TxHash: b.Block.Transactions[i].Hash, + Root: types.StringToHash("mockhashstring"), + TransactionType: types.LegacyTx, + GasUsed: uint64(100000), + Status: &status, + Logs: logs, + CumulativeGasUsed: uint64(100000), + ContractAddress: &types.Address{0xaa, 0xbb, 0xcc, 0xdd, 0xab, 0xac}, + } + } + + for i := 0; i < 5; i++ { + b.Receipts[i].LogsBloom = types.CreateBloom(b.Receipts) + } + + return b +} + +func newStorageP(t *testing.T) (storage.Storage, func(), string) { + t.Helper() + + p, err := filepath.Abs("/media/nikola/DATA/tmp/storage") + + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + err = os.MkdirAll(p, 0755) + if err != nil { + t.Fatal(err) + } + + s, err := NewLevelDBStorage(p, hclog.NewNullLogger()) + if err != nil { + t.Fatal(err) + } + + closeFn := func() { + if err := s.Close(); err != nil { + t.Fatal(err) + } + + if err := os.RemoveAll(p); err != nil { + t.Fatal(err) + } + } + + return s, closeFn, p +} + +func countLdbFilesInPath(path string) int { + pattern := filepath.Join(path, "*.ldb") + + files, err := filepath.Glob(pattern) + if err != nil { + return -1 + } + + return len(files) +} + +func generateBlocks(t *testing.T, count int, ch chan *types.FullBlock, ctx context.Context) { + t.Helper() + + ticker := time.NewTicker(time.Second) + + for i := 1; i <= count; i++ { + b := generateBlock(t, uint64(i)) + select { + case <-ctx.Done(): + close(ch) + ticker.Stop() + + return + case <-ticker.C: + ch <- b + } + } +} + +func DirSize(path string) (int64, error) { + var size int64 + + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + + return err + }) + + return size, err +} + +func TestWriteFullBlock(t *testing.T) { + t.Helper() + + s, _, path := newStorageP(t) + defer s.Close() + + count := 40000 + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*45) + + signchan := make(chan os.Signal, 1) + signal.Notify(signchan, syscall.SIGINT) + + go func() { + <-signchan + cancel() + }() + + blockchain := make(chan *types.FullBlock, 1) + go generateBlocks(t, count, blockchain, ctx) + +insertloop: + for i := 1; i <= count; i++ { + select { + case <-ctx.Done(): + break insertloop + case b := <-blockchain: + batch := s.NewBatch() + + if err := batch.WriteBody(b.Block.Hash(), b.Block.Body()); err != nil { + t.Log(err) + } + + for _, tx := range b.Block.Transactions { + if err := batch.WriteTxLookup(tx.Hash, b.Block.Hash()); err != nil { + t.Log(err) + } + } + + if err := s.WriteHeader(b.Block.Header); err != nil { + t.Log(err) + } + + if err := s.WriteHeadNumber(uint64(i)); err != nil { + t.Log(err) + } + + if err := s.WriteHeadHash(b.Block.Header.Hash); err != nil { + t.Log(err) + } + + if err := batch.WriteReceipts(b.Block.Hash(), b.Receipts); err != nil { + t.Log(err) + } + + if err := batch.Write(); err != nil { + fmt.Println(err) + t.FailNow() + } + + fmt.Println("writing block", i) + + size, _ := DirSize(path) + fmt.Println("\tldb file count:", countLdbFilesInPath(path)) + fmt.Println("\tdir size", size/1_000_000, "MBs") + } + } +} diff --git a/blockchain/storage/memory/memory.go b/blockchain/storage/memory/memory.go index 7af8bc7412..b94435eb3e 100644 --- a/blockchain/storage/memory/memory.go +++ b/blockchain/storage/memory/memory.go @@ -36,3 +36,7 @@ func (m *memoryKV) Get(p []byte) ([]byte, bool, error) { func (m *memoryKV) Close() error { return nil } + +func (m *memoryKV) NewBatch() *storage.Batch { + return nil +} diff --git a/blockchain/storage/storage.go b/blockchain/storage/storage.go index 1010e8a2f1..1ab4f2378d 100644 --- a/blockchain/storage/storage.go +++ b/blockchain/storage/storage.go @@ -37,6 +37,8 @@ type Storage interface { WriteTxLookup(hash types.Hash, blockHash types.Hash) error ReadTxLookup(hash types.Hash) (types.Hash, bool) + NewBatch() *Batch + Close() error } diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index 63330b2d4e..b2478c75f2 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -733,3 +733,7 @@ func (m *MockStorage) Close() error { func (m *MockStorage) HookClose(fn closeDelegate) { m.closeFn = fn } + +func (m *MockStorage) NewBatch() *Batch { + return nil +} From 2daaa5d9aee81731bd3caefc28eaeb99c08cb090 Mon Sep 17 00:00:00 2001 From: Igor Crevar Date: Tue, 30 May 2023 18:20:58 +0200 Subject: [PATCH 02/16] batch interface --- blockchain/blockchain.go | 60 +++++----- blockchain/blockchain_test.go | 59 +++------- blockchain/storage/batch.go | 129 ++++++++------------- blockchain/storage/keyvalue.go | 4 +- blockchain/storage/leveldb/batch.go | 32 +++++ blockchain/storage/leveldb/leveldb.go | 4 +- blockchain/storage/leveldb/leveldb_test.go | 51 +++----- blockchain/storage/memory/batch.go | 35 ++++++ blockchain/storage/memory/memory.go | 4 +- blockchain/storage/storage.go | 2 +- blockchain/storage/testing.go | 2 +- 11 files changed, 179 insertions(+), 203 deletions(-) create mode 100644 blockchain/storage/leveldb/batch.go create mode 100644 blockchain/storage/memory/batch.go diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index d84e9b1b82..00db6ba1de 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -441,16 +441,14 @@ func (b *Blockchain) GetTD(hash types.Hash) (*big.Int, bool) { } // writeCanonicalHeader writes the new header -func (b *Blockchain) writeCanonicalHeader(batch *storage.Batch, event *Event, h *types.Header) error { +func (b *Blockchain) writeCanonicalHeader(batch *storage.BatchHelper, event *Event, h *types.Header) error { parentTD, ok := b.readTotalDifficulty(h.ParentHash) if !ok { return fmt.Errorf("parent difficulty not found") } newTD := big.NewInt(0).Add(parentTD, new(big.Int).SetUint64(h.Difficulty)) - if err := batch.WriteCanonicalHeader(h, newTD); err != nil { - return err - } + batch.WriteCanonicalHeader(h, newTD) event.Type = EventHead event.AddNewHeader(h) @@ -611,8 +609,6 @@ func (b *Blockchain) WriteHeaders(headers []*types.Header) error { // WriteHeadersWithBodies writes a batch of headers func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { - batch := b.db.NewBatch() - // Check the size if len(headers) == 0 { return fmt.Errorf("passed in headers array is empty") @@ -636,10 +632,18 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { } } - // Write the actual headers + // Write the actual headers in seperate batches for now for _, h := range headers { event := &Event{} - if err := b.writeHeaderImpl(batch, event, h); err != nil { + + batch := b.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) + + if err := b.writeHeaderImpl(batchHelper, event, h); err != nil { + return err + } + + if err := batch.Write(); err != nil { return err } @@ -647,10 +651,6 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { b.dispatchEvent(event) } - if err := batch.Write(); err != nil { - return err - } - return nil } @@ -868,6 +868,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro block := fblock.Block batch := b.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -877,22 +878,20 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro header := block.Header - if err := b.writeBody(batch, block); err != nil { + if err := b.writeBody(batchHelper, block); err != nil { return err } // Write the header to the chain evnt := &Event{Source: source} - if err := b.writeHeaderImpl(batch, evnt, header); err != nil { + if err := b.writeHeaderImpl(batchHelper, evnt, header); err != nil { return err } // write the receipts, do it only after the header has been written. // Otherwise, a client might ask for a header once the receipt is valid, // but before it is written into the storage - if err := batch.WriteReceipts(block.Hash(), fblock.Receipts); err != nil { - return err - } + batchHelper.WriteReceipts(block.Hash(), fblock.Receipts) // update snapshot if err := b.consensus.ProcessHeaders([]*types.Header{header}); err != nil { @@ -930,6 +929,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // It doesn't do any kind of verification, only commits the block to the DB func (b *Blockchain) WriteBlock(block *types.Block, source string) error { batch := b.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -939,13 +939,13 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { header := block.Header - if err := b.writeBody(batch, block); err != nil { + if err := b.writeBody(batchHelper, block); err != nil { return err } // Write the header to the chain evnt := &Event{Source: source} - if err := b.writeHeaderImpl(batch, evnt, header); err != nil { + if err := b.writeHeaderImpl(batchHelper, evnt, header); err != nil { return err } @@ -1054,7 +1054,7 @@ func (b *Blockchain) updateGasPriceAvgWithBlock(block *types.Block) { // writeBody writes the block body to the DB. // Additionally, it also updates the txn lookup, for txnHash -> block lookups -func (b *Blockchain) writeBody(batch *storage.Batch, block *types.Block) error { +func (b *Blockchain) writeBody(batch *storage.BatchHelper, block *types.Block) error { // Recover 'from' field in tx before saving // Because the block passed from the consensus layer doesn't have from field in tx, // due to missing encoding in RLP @@ -1063,15 +1063,11 @@ func (b *Blockchain) writeBody(batch *storage.Batch, block *types.Block) error { } // Write the full body (txns + receipts) - if err := batch.WriteBody(block.Header.Hash, block.Body()); err != nil { - return err - } + batch.WriteBody(block.Header.Hash, block.Body()) // Write txn lookups (txHash -> block) for _, txn := range block.Transactions { - if err := batch.WriteTxLookup(txn.Hash, block.Hash()); err != nil { - return err - } + batch.WriteTxLookup(txn.Hash, block.Hash()) } return nil @@ -1205,7 +1201,7 @@ func (b *Blockchain) dispatchEvent(evnt *Event) { } // writeHeaderImpl writes a block and the data, assumes the genesis is already set -func (b *Blockchain) writeHeaderImpl(batch *storage.Batch, evnt *Event, header *types.Header) error { +func (b *Blockchain) writeHeaderImpl(batch *storage.BatchHelper, evnt *Event, header *types.Header) error { currentHeader := b.Header() // Write the data @@ -1214,9 +1210,7 @@ func (b *Blockchain) writeHeaderImpl(batch *storage.Batch, evnt *Event, header * return b.writeCanonicalHeader(batch, evnt, header) } - if err := batch.WriteHeader(header); err != nil { - return err - } + batch.WriteHeader(header) currentTD, ok := b.readTotalDifficulty(currentHeader.Hash) if !ok { @@ -1234,15 +1228,13 @@ func (b *Blockchain) writeHeaderImpl(batch *storage.Batch, evnt *Event, header * } // Write the difficulty - if err := batch.WriteTotalDifficulty( + batch.WriteTotalDifficulty( header.Hash, big.NewInt(0).Add( parentTD, big.NewInt(0).SetUint64(header.Difficulty), ), - ); err != nil { - return err - } + ) // Update the headers cache b.headersCache.Add(header.Hash, header) diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index d0a24f7b50..d885893a6a 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" "math/big" - "os" - "path" "reflect" "testing" @@ -16,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/0xPolygon/polygon-edge/blockchain/storage" - "github.com/0xPolygon/polygon-edge/blockchain/storage/leveldb" + "github.com/0xPolygon/polygon-edge/blockchain/storage/memory" "github.com/0xPolygon/polygon-edge/types" ) @@ -559,10 +557,11 @@ func TestBlockchainWriteBody(t *testing.T) { ) *Blockchain { t.Helper() - storage, _ := newStorageP(t, path) + dbStorage, err := memory.NewMemoryStorage(nil) + assert.NoError(t, err) chain := &Blockchain{ - db: storage, + db: dbStorage, txSigner: &mockSigner{ txFromByTxHash: txFromByTxHash, }, @@ -595,10 +594,11 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t1") defer chain.db.Close() batch := chain.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) assert.NoError( t, - chain.writeBody(batch, block), + chain.writeBody(batchHelper, block), ) assert.NoError(t, batch.Write()) }) @@ -626,11 +626,12 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t2") defer chain.db.Close() batch := chain.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) assert.ErrorIs( t, errRecoveryAddressFailed, - chain.writeBody(batch, block), + chain.writeBody(batchHelper, block), ) assert.NoError(t, batch.Write()) }) @@ -660,8 +661,9 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t3") defer chain.db.Close() batch := chain.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) - assert.NoError(t, chain.writeBody(batch, block)) + assert.NoError(t, chain.writeBody(batchHelper, block)) assert.NoError(t, batch.Write()) readBody, ok := chain.readBody(block.Hash()) @@ -865,20 +867,22 @@ func Test_recoverFromFieldsInTransactions(t *testing.T) { } func TestBlockchainReadBody(t *testing.T) { - storage, _ := newStorageP(t, "TestBlockchainReadBody") + dbStorage, err := memory.NewMemoryStorage(nil) + assert.NoError(t, err) txFromByTxHash := make(map[types.Hash]types.Address) addr := types.StringToAddress("1") b := &Blockchain{ logger: hclog.NewNullLogger(), - db: storage, + db: dbStorage, txSigner: &mockSigner{ txFromByTxHash: txFromByTxHash, }, } batch := b.db.NewBatch() + batchHelper := storage.NewBatchHelper(batch) tx := &types.Transaction{ Value: big.NewInt(10), @@ -898,7 +902,7 @@ func TestBlockchainReadBody(t *testing.T) { txFromByTxHash[tx.Hash] = types.ZeroAddress - if err := b.writeBody(batch, block); err != nil { + if err := b.writeBody(batchHelper, block); err != nil { t.Fatal(err) } @@ -1405,36 +1409,3 @@ func TestBlockchain_CalculateBaseFee(t *testing.T) { }) } } - -func newStorageP(t *testing.T, testPath string) (storage.Storage, func()) { - t.Helper() - - dir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - p := path.Join(dir, "/tmp/storage/", testPath) - - err = os.MkdirAll(p, 0755) - if err != nil { - t.Fatal(err) - } - - s, err := leveldb.NewLevelDBStorage(p, hclog.NewNullLogger()) - if err != nil { - t.Fatal(err) - } - - closeFn := func() { - if err := s.Close(); err != nil { - t.Fatal(err) - } - - if err := os.RemoveAll(p); err != nil { - t.Fatal(err) - } - } - - return s, closeFn -} diff --git a/blockchain/storage/batch.go b/blockchain/storage/batch.go index f1e9e4a367..c261148dbe 100644 --- a/blockchain/storage/batch.go +++ b/blockchain/storage/batch.go @@ -1,129 +1,98 @@ package storage import ( - "encoding/binary" "math/big" + "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" - "github.com/syndtr/goleveldb/leveldb" "github.com/umbracle/fastrlp" ) -type Batch struct { - DB *leveldb.DB - B *leveldb.Batch - Size int +type Batch interface { + Delete(key []byte) + Write() error + Put(k []byte, data []byte) } -func NewBatch(db *leveldb.DB) *Batch { - return &Batch{ - DB: db, - B: new(leveldb.Batch), - } +type BatchHelper struct { + batch Batch } -func (b *Batch) WriteHeader(h *types.Header) error { - return b.writeRLP(HEADER, h.Hash.Bytes(), h) +func NewBatchHelper(batch Batch) *BatchHelper { + return &BatchHelper{batch: batch} } -func (b *Batch) WriteBody(hash types.Hash, body *types.Body) error { - return b.writeRLP(BODY, hash.Bytes(), body) +func (b *BatchHelper) WriteHeader(h *types.Header) { + b.putRlp(HEADER, h.Hash.Bytes(), h) } -// Delete inserts the a key removal into the batch for later committing. -func (b *Batch) Delete(key []byte) error { - b.B.Delete(key) - b.Size += len(key) - - return nil +func (b *BatchHelper) WriteBody(hash types.Hash, body *types.Body) { + b.putRlp(BODY, hash.Bytes(), body) } -func (b *Batch) WriteHeadHash(h types.Hash) error { - return b.add(HEAD, HASH, h.Bytes()) +func (b *BatchHelper) WriteHeadHash(h types.Hash) { + b.putWithPrefix(HEAD, HASH, h.Bytes()) } -func (b *Batch) WriteTxLookup(hash types.Hash, blockHash types.Hash) error { +func (b *BatchHelper) WriteTxLookup(hash types.Hash, blockHash types.Hash) { ar := &fastrlp.Arena{} - vr := ar.NewBytes(blockHash.Bytes()) - - return b.write2(TX_LOOKUP_PREFIX, hash.Bytes(), vr) -} - -func (b *Batch) encodeUint(n uint64) []byte { - bites := make([]byte, 8) - binary.BigEndian.PutUint64(bites[:], n) + vr := ar.NewBytes(blockHash.Bytes()).MarshalTo(nil) - return bites[:] + b.putWithPrefix(TX_LOOKUP_PREFIX, hash.Bytes(), vr) } -func (b *Batch) WriteHeadNumber(n uint64) error { - return b.add(HEAD, NUMBER, b.encodeUint(n)) +func (b *BatchHelper) WriteHeadNumber(n uint64) { + b.putWithPrefix(HEAD, NUMBER, common.EncodeUint64ToBytes(n)) } -func (b *Batch) write2(p, k []byte, v *fastrlp.Value) error { - dst := v.MarshalTo(nil) - - return b.add(p, k, dst) -} -func (b *Batch) WriteReceipts(hash types.Hash, receipts []*types.Receipt) error { +func (b *BatchHelper) WriteReceipts(hash types.Hash, receipts []*types.Receipt) { rr := types.Receipts(receipts) - return b.writeRLP(RECEIPTS, hash.Bytes(), &rr) + b.putRlp(RECEIPTS, hash.Bytes(), &rr) } -// Write flushes any accumulated data to disk. -func (b *Batch) Write() error { - return b.DB.Write(b.B, nil) +func (b *BatchHelper) WriteCanonicalHeader(h *types.Header, diff *big.Int) { + b.WriteHeader(h) + b.WriteHeadHash(h.Hash) + b.WriteHeadNumber(h.Number) + b.WriteCanonicalHash(h.Number, h.Hash) + b.WriteTotalDifficulty(h.Hash, diff) } -func (b *Batch) add(p []byte, k []byte, data []byte) error { - p = append(p, k...) - b.B.Put(p, data) - - b.Size += len(p) + len(data) +func (b *BatchHelper) WriteCanonicalHash(n uint64, hash types.Hash) { + b.putWithPrefix(CANONICAL, common.EncodeUint64ToBytes(n), hash.Bytes()) +} - return nil +func (b *BatchHelper) WriteTotalDifficulty(hash types.Hash, diff *big.Int) { + b.putWithPrefix(DIFFICULTY, hash.Bytes(), diff.Bytes()) } -func (b *Batch) writeRLP(p, k []byte, raw types.RLPMarshaler) error { +func (b *BatchHelper) putRlp(p, k []byte, raw types.RLPMarshaler) { var data []byte + if obj, ok := raw.(types.RLPStoreMarshaler); ok { data = obj.MarshalStoreRLPTo(nil) } else { data = raw.MarshalRLPTo(nil) } - return b.add(p, k, data) + b.putWithPrefix(p, k, data) } -func (b *Batch) WriteCanonicalHeader(h *types.Header, diff *big.Int) error { - if err := b.WriteHeader(h); err != nil { - return err - } - - if err := b.WriteHeadHash(h.Hash); err != nil { - return err - } - - if err := b.WriteHeadNumber(h.Number); err != nil { - return err - } - - if err := b.WriteCanonicalHash(h.Number, h.Hash); err != nil { - return err - } +func (b *BatchHelper) putWithPrefix(p, k, data []byte) { + fullKey := append(append([]byte{}, p...), k...) - if err := b.WriteTotalDifficulty(h.Hash, diff); err != nil { - return err - } - - return nil + b.batch.Put(fullKey, data) } -func (b *Batch) WriteCanonicalHash(n uint64, hash types.Hash) error { - return b.add(CANONICAL, b.encodeUint(n), hash.Bytes()) -} +// func (b *BatchHelper) Delete(key []byte) error { +// return b.Delete(key) +// } -func (b *Batch) WriteTotalDifficulty(hash types.Hash, diff *big.Int) error { - return b.add(DIFFICULTY, hash.Bytes(), diff.Bytes()) -} +// func (b *BatchHelper) Write() error { +// return b.batch.Write() +// } + +// func (b *BatchHelper) Put(k []byte, data []byte) error { +// return b.batch.Put(k, data) +// } diff --git a/blockchain/storage/keyvalue.go b/blockchain/storage/keyvalue.go index 801faffeda..602ae3e78f 100644 --- a/blockchain/storage/keyvalue.go +++ b/blockchain/storage/keyvalue.go @@ -55,7 +55,7 @@ type KV interface { Close() error Set(p []byte, v []byte) error Get(p []byte) ([]byte, bool, error) - NewBatch() *Batch + NewBatch() Batch } // KeyValueStorage is a generic storage for kv databases @@ -352,6 +352,6 @@ func (s *KeyValueStorage) Close() error { return s.db.Close() } -func (s *KeyValueStorage) NewBatch() *Batch { +func (s *KeyValueStorage) NewBatch() Batch { return s.db.NewBatch() } diff --git a/blockchain/storage/leveldb/batch.go b/blockchain/storage/leveldb/batch.go new file mode 100644 index 0000000000..be657c073c --- /dev/null +++ b/blockchain/storage/leveldb/batch.go @@ -0,0 +1,32 @@ +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb" +) + +type batchLevelDB struct { + db *leveldb.DB + b *leveldb.Batch + Size int +} + +func NewBatchLevelDB(db *leveldb.DB) *batchLevelDB { + return &batchLevelDB{ + db: db, + b: new(leveldb.Batch), + } +} + +func (b *batchLevelDB) Delete(key []byte) { + b.b.Delete(key) + b.Size += len(key) +} + +func (b *batchLevelDB) Write() error { + return b.db.Write(b.b, nil) +} + +func (b *batchLevelDB) Put(k []byte, data []byte) { + b.b.Put(k, data) + b.Size += len(k) + len(data) +} diff --git a/blockchain/storage/leveldb/leveldb.go b/blockchain/storage/leveldb/leveldb.go index c126d43679..2a132d9baa 100644 --- a/blockchain/storage/leveldb/leveldb.go +++ b/blockchain/storage/leveldb/leveldb.go @@ -64,6 +64,6 @@ func (l *levelDBKV) Close() error { return l.db.Close() } -func (l *levelDBKV) NewBatch() *storage.Batch { - return storage.NewBatch(l.db) +func (l *levelDBKV) NewBatch() storage.Batch { + return NewBatchLevelDB(l.db) } diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index 2a5f4f8784..7254faa942 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -16,6 +16,7 @@ import ( "github.com/0xPolygon/polygon-edge/blockchain/storage" "github.com/0xPolygon/polygon-edge/types" "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" ) func newStorage(t *testing.T) (storage.Storage, func()) { @@ -150,31 +151,21 @@ func generateBlock(t *testing.T, num uint64) *types.FullBlock { func newStorageP(t *testing.T) (storage.Storage, func(), string) { t.Helper() - p, err := filepath.Abs("/media/nikola/DATA/tmp/storage") + p, err := os.MkdirTemp("", "leveldbtest") + require.NoError(t, err) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - err = os.MkdirAll(p, 0755) - if err != nil { - t.Fatal(err) - } + require.NoError(t, os.MkdirAll(p, 0755)) s, err := NewLevelDBStorage(p, hclog.NewNullLogger()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) closeFn := func() { + require.NoError(t, s.Close()) if err := s.Close(); err != nil { t.Fatal(err) } - if err := os.RemoveAll(p); err != nil { - t.Fatal(err) - } + require.NoError(t, os.RemoveAll(p)) } return s, closeFn, p @@ -254,32 +245,18 @@ insertloop: break insertloop case b := <-blockchain: batch := s.NewBatch() + batchHelper := storage.NewBatchHelper(batch) - if err := batch.WriteBody(b.Block.Hash(), b.Block.Body()); err != nil { - t.Log(err) - } + batchHelper.WriteBody(b.Block.Hash(), b.Block.Body()) for _, tx := range b.Block.Transactions { - if err := batch.WriteTxLookup(tx.Hash, b.Block.Hash()); err != nil { - t.Log(err) - } + batchHelper.WriteTxLookup(tx.Hash, b.Block.Hash()) } - if err := s.WriteHeader(b.Block.Header); err != nil { - t.Log(err) - } - - if err := s.WriteHeadNumber(uint64(i)); err != nil { - t.Log(err) - } - - if err := s.WriteHeadHash(b.Block.Header.Hash); err != nil { - t.Log(err) - } - - if err := batch.WriteReceipts(b.Block.Hash(), b.Receipts); err != nil { - t.Log(err) - } + batchHelper.WriteHeader(b.Block.Header) + batchHelper.WriteHeadNumber(uint64(i)) + batchHelper.WriteHeadHash(b.Block.Header.Hash) + batchHelper.WriteReceipts(b.Block.Hash(), b.Receipts) if err := batch.Write(); err != nil { fmt.Println(err) diff --git a/blockchain/storage/memory/batch.go b/blockchain/storage/memory/batch.go new file mode 100644 index 0000000000..fe4b1fabbc --- /dev/null +++ b/blockchain/storage/memory/batch.go @@ -0,0 +1,35 @@ +package memory + +import ( + "github.com/0xPolygon/polygon-edge/helper/hex" +) + +type batchMemory struct { + db map[string][]byte + keysToDelete [][]byte + valuesToPut [][2][]byte +} + +func NewBatchMemory(db map[string][]byte) *batchMemory { + return &batchMemory{db: db} +} + +func (b *batchMemory) Delete(key []byte) { + b.keysToDelete = append(b.keysToDelete, key) +} + +func (b *batchMemory) Write() error { + for _, x := range b.keysToDelete { + delete(b.db, hex.EncodeToHex(x)) + } + + for _, x := range b.valuesToPut { + b.db[hex.EncodeToHex(x[0])] = x[1] + } + + return nil +} + +func (b *batchMemory) Put(k []byte, data []byte) { + b.valuesToPut = append(b.valuesToPut, [2][]byte{k, data}) +} diff --git a/blockchain/storage/memory/memory.go b/blockchain/storage/memory/memory.go index b94435eb3e..15cdf1e67d 100644 --- a/blockchain/storage/memory/memory.go +++ b/blockchain/storage/memory/memory.go @@ -37,6 +37,6 @@ func (m *memoryKV) Close() error { return nil } -func (m *memoryKV) NewBatch() *storage.Batch { - return nil +func (m *memoryKV) NewBatch() storage.Batch { + return NewBatchMemory(m.db) } diff --git a/blockchain/storage/storage.go b/blockchain/storage/storage.go index 1ab4f2378d..2499d85e6d 100644 --- a/blockchain/storage/storage.go +++ b/blockchain/storage/storage.go @@ -37,7 +37,7 @@ type Storage interface { WriteTxLookup(hash types.Hash, blockHash types.Hash) error ReadTxLookup(hash types.Hash) (types.Hash, bool) - NewBatch() *Batch + NewBatch() Batch Close() error } diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index b2478c75f2..4a808839c0 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -734,6 +734,6 @@ func (m *MockStorage) HookClose(fn closeDelegate) { m.closeFn = fn } -func (m *MockStorage) NewBatch() *Batch { +func (m *MockStorage) NewBatch() Batch { return nil } From b81f590507739375e2e1252953d91e90f79c2785 Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Wed, 31 May 2023 10:40:12 +0200 Subject: [PATCH 03/16] Go lint and levedb test fix --- blockchain/blockchain.go | 2 +- blockchain/storage/leveldb/leveldb_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 00db6ba1de..7a6119458d 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -632,7 +632,7 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { } } - // Write the actual headers in seperate batches for now + // Write the actual headers in separate batches for now for _, h := range headers { event := &Event{} diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index 7254faa942..74c2099a78 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -161,6 +161,7 @@ func newStorageP(t *testing.T) (storage.Storage, func(), string) { closeFn := func() { require.NoError(t, s.Close()) + if err := s.Close(); err != nil { t.Fatal(err) } @@ -224,7 +225,7 @@ func TestWriteFullBlock(t *testing.T) { s, _, path := newStorageP(t) defer s.Close() - count := 40000 + count := 100 ctx, cancel := context.WithTimeout(context.Background(), time.Minute*45) signchan := make(chan os.Signal, 1) From 279e87d1588bd0fce96800ab0f310e6ff96a638f Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Thu, 1 Jun 2023 13:53:56 +0200 Subject: [PATCH 04/16] Added go-ethereum default batch options --- blockchain/storage/leveldb/leveldb.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/blockchain/storage/leveldb/leveldb.go b/blockchain/storage/leveldb/leveldb.go index 2a132d9baa..b3b6132a66 100644 --- a/blockchain/storage/leveldb/leveldb.go +++ b/blockchain/storage/leveldb/leveldb.go @@ -6,6 +6,12 @@ import ( "github.com/0xPolygon/polygon-edge/blockchain/storage" "github.com/hashicorp/go-hclog" "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +const ( + DefaultCache = int(256) + DefaultHandles = int(256) ) // Factory creates a leveldb storage @@ -25,7 +31,13 @@ func Factory(config map[string]interface{}, logger hclog.Logger) (storage.Storag // NewLevelDBStorage creates the new storage reference with leveldb func NewLevelDBStorage(path string, logger hclog.Logger) (storage.Storage, error) { - db, err := leveldb.OpenFile(path, nil) + options := opt.Options{} + // Set default options + options.OpenFilesCacheCapacity = DefaultHandles + options.BlockCacheCapacity = DefaultCache / 2 * opt.MiB + options.WriteBuffer = DefaultCache / 4 * opt.MiB // Two of these are used internally + + db, err := leveldb.OpenFile(path, &options) if err != nil { return nil, err } From cf46fe06de0cc1c83b672f510a0d29d53f71765a Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Wed, 7 Jun 2023 11:39:56 +0200 Subject: [PATCH 05/16] CR changes --- blockchain/blockchain.go | 21 ++++++++----------- blockchain/blockchain_test.go | 20 +++++++----------- .../storage/{batch.go => batch_helper.go} | 18 ++++++---------- blockchain/storage/keyvalue.go | 4 ++++ blockchain/storage/leveldb/batch.go | 7 ++----- blockchain/storage/leveldb/leveldb.go | 11 +++++++--- blockchain/storage/leveldb/leveldb_test.go | 10 +++------ blockchain/storage/storage.go | 1 + blockchain/storage/testing.go | 4 ++++ 9 files changed, 45 insertions(+), 51 deletions(-) rename blockchain/storage/{batch.go => batch_helper.go} (87%) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 7a6119458d..e52595e588 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -636,14 +636,13 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { for _, h := range headers { event := &Event{} - batch := b.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(b.db) if err := b.writeHeaderImpl(batchHelper, event, h); err != nil { return err } - if err := batch.Write(); err != nil { + if err := batchHelper.WriteBatch(); err != nil { return err } @@ -867,8 +866,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro defer b.writeLock.Unlock() block := fblock.Block - batch := b.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(b.db) if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -916,7 +914,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro logArgs = append(logArgs, "generation_time_in_seconds", diff) } - if err := batch.Write(); err != nil { + if err := batchHelper.WriteBatch(); err != nil { return err } @@ -928,8 +926,10 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // WriteBlock writes a single block to the local blockchain. // It doesn't do any kind of verification, only commits the block to the DB func (b *Blockchain) WriteBlock(block *types.Block, source string) error { - batch := b.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + b.writeLock.Lock() + defer b.writeLock.Unlock() + + batchHelper := storage.NewBatchHelper(b.db) if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -985,10 +985,7 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { logArgs = append(logArgs, "generation_time_in_seconds", diff) } - b.writeLock.Lock() - defer b.writeLock.Unlock() - - if err := batch.Write(); err != nil { + if err := batchHelper.WriteBatch(); err != nil { return err } diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index d885893a6a..a8b057820e 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -593,14 +593,13 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t1") defer chain.db.Close() - batch := chain.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(chain.db) assert.NoError( t, chain.writeBody(batchHelper, block), ) - assert.NoError(t, batch.Write()) + assert.NoError(t, batchHelper.WriteBatch()) }) t.Run("should return error if tx doesn't have from and recovering address fails", func(t *testing.T) { @@ -625,15 +624,14 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t2") defer chain.db.Close() - batch := chain.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(chain.db) assert.ErrorIs( t, errRecoveryAddressFailed, chain.writeBody(batchHelper, block), ) - assert.NoError(t, batch.Write()) + assert.NoError(t, batchHelper.WriteBatch()) }) t.Run("should recover from address and store to storage", func(t *testing.T) { @@ -660,11 +658,10 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t3") defer chain.db.Close() - batch := chain.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(chain.db) assert.NoError(t, chain.writeBody(batchHelper, block)) - assert.NoError(t, batch.Write()) + assert.NoError(t, batchHelper.WriteBatch()) readBody, ok := chain.readBody(block.Hash()) assert.True(t, ok) @@ -881,8 +878,7 @@ func TestBlockchainReadBody(t *testing.T) { }, } - batch := b.db.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(b.db) tx := &types.Transaction{ Value: big.NewInt(10), @@ -906,7 +902,7 @@ func TestBlockchainReadBody(t *testing.T) { t.Fatal(err) } - assert.NoError(t, batch.Write()) + assert.NoError(t, batchHelper.WriteBatch()) txFromByTxHash[tx.Hash] = addr diff --git a/blockchain/storage/batch.go b/blockchain/storage/batch_helper.go similarity index 87% rename from blockchain/storage/batch.go rename to blockchain/storage/batch_helper.go index c261148dbe..89984122d0 100644 --- a/blockchain/storage/batch.go +++ b/blockchain/storage/batch_helper.go @@ -18,7 +18,9 @@ type BatchHelper struct { batch Batch } -func NewBatchHelper(batch Batch) *BatchHelper { +func NewBatchHelper(storage Storage) *BatchHelper { + batch := storage.NewBatch() + return &BatchHelper{batch: batch} } @@ -85,14 +87,6 @@ func (b *BatchHelper) putWithPrefix(p, k, data []byte) { b.batch.Put(fullKey, data) } -// func (b *BatchHelper) Delete(key []byte) error { -// return b.Delete(key) -// } - -// func (b *BatchHelper) Write() error { -// return b.batch.Write() -// } - -// func (b *BatchHelper) Put(k []byte, data []byte) error { -// return b.batch.Put(k, data) -// } +func (b *BatchHelper) WriteBatch() error { + return b.batch.Write() +} diff --git a/blockchain/storage/keyvalue.go b/blockchain/storage/keyvalue.go index 602ae3e78f..b49c6bc69e 100644 --- a/blockchain/storage/keyvalue.go +++ b/blockchain/storage/keyvalue.go @@ -355,3 +355,7 @@ func (s *KeyValueStorage) Close() error { func (s *KeyValueStorage) NewBatch() Batch { return s.db.NewBatch() } + +func (s *KeyValueStorage) WriteBatch() error { + return s.db.NewBatch().Write() +} diff --git a/blockchain/storage/leveldb/batch.go b/blockchain/storage/leveldb/batch.go index be657c073c..bfc8a51fb5 100644 --- a/blockchain/storage/leveldb/batch.go +++ b/blockchain/storage/leveldb/batch.go @@ -5,9 +5,8 @@ import ( ) type batchLevelDB struct { - db *leveldb.DB - b *leveldb.Batch - Size int + db *leveldb.DB + b *leveldb.Batch } func NewBatchLevelDB(db *leveldb.DB) *batchLevelDB { @@ -19,7 +18,6 @@ func NewBatchLevelDB(db *leveldb.DB) *batchLevelDB { func (b *batchLevelDB) Delete(key []byte) { b.b.Delete(key) - b.Size += len(key) } func (b *batchLevelDB) Write() error { @@ -28,5 +26,4 @@ func (b *batchLevelDB) Write() error { func (b *batchLevelDB) Put(k []byte, data []byte) { b.b.Put(k, data) - b.Size += len(k) + len(data) } diff --git a/blockchain/storage/leveldb/leveldb.go b/blockchain/storage/leveldb/leveldb.go index b3b6132a66..27742ffe77 100644 --- a/blockchain/storage/leveldb/leveldb.go +++ b/blockchain/storage/leveldb/leveldb.go @@ -29,15 +29,20 @@ func Factory(config map[string]interface{}, logger hclog.Logger) (storage.Storag return NewLevelDBStorage(pathStr, logger) } -// NewLevelDBStorage creates the new storage reference with leveldb +// NewLevelDBStorage creates the new storage reference with leveldb default options func NewLevelDBStorage(path string, logger hclog.Logger) (storage.Storage, error) { - options := opt.Options{} + options := &opt.Options{} // Set default options options.OpenFilesCacheCapacity = DefaultHandles options.BlockCacheCapacity = DefaultCache / 2 * opt.MiB options.WriteBuffer = DefaultCache / 4 * opt.MiB // Two of these are used internally - db, err := leveldb.OpenFile(path, &options) + return NewLevelDBStorageWithOpt(path, logger, options) +} + +// NewLevelDBStorageWithOpt creates the new storage reference with leveldb with custom options +func NewLevelDBStorageWithOpt(path string, logger hclog.Logger, opts *opt.Options) (storage.Storage, error) { + db, err := leveldb.OpenFile(path, opts) if err != nil { return nil, err } diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index 74c2099a78..f637f64cd6 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -220,8 +220,6 @@ func DirSize(path string) (int64, error) { } func TestWriteFullBlock(t *testing.T) { - t.Helper() - s, _, path := newStorageP(t) defer s.Close() @@ -245,8 +243,7 @@ insertloop: case <-ctx.Done(): break insertloop case b := <-blockchain: - batch := s.NewBatch() - batchHelper := storage.NewBatchHelper(batch) + batchHelper := storage.NewBatchHelper(s) batchHelper.WriteBody(b.Block.Hash(), b.Block.Body()) @@ -259,9 +256,8 @@ insertloop: batchHelper.WriteHeadHash(b.Block.Header.Hash) batchHelper.WriteReceipts(b.Block.Hash(), b.Receipts) - if err := batch.Write(); err != nil { - fmt.Println(err) - t.FailNow() + if err := batchHelper.WriteBatch(); err != nil { + require.NoError(t, err) } fmt.Println("writing block", i) diff --git a/blockchain/storage/storage.go b/blockchain/storage/storage.go index 2499d85e6d..75232f21f5 100644 --- a/blockchain/storage/storage.go +++ b/blockchain/storage/storage.go @@ -38,6 +38,7 @@ type Storage interface { ReadTxLookup(hash types.Hash) (types.Hash, bool) NewBatch() Batch + WriteBatch() error Close() error } diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index 4a808839c0..960d8aab56 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -737,3 +737,7 @@ func (m *MockStorage) HookClose(fn closeDelegate) { func (m *MockStorage) NewBatch() Batch { return nil } + +func (m *MockStorage) WriteBatch() error { + return nil +} From 5ae832c5e3697bd99a48d8345ce5825cccd9328a Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Wed, 7 Jun 2023 14:38:28 +0200 Subject: [PATCH 06/16] CR changes --- blockchain/storage/leveldb/leveldb.go | 9 +++++---- blockchain/storage/storage.go | 1 - 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/blockchain/storage/leveldb/leveldb.go b/blockchain/storage/leveldb/leveldb.go index 27742ffe77..e49ac04267 100644 --- a/blockchain/storage/leveldb/leveldb.go +++ b/blockchain/storage/leveldb/leveldb.go @@ -31,11 +31,12 @@ func Factory(config map[string]interface{}, logger hclog.Logger) (storage.Storag // NewLevelDBStorage creates the new storage reference with leveldb default options func NewLevelDBStorage(path string, logger hclog.Logger) (storage.Storage, error) { - options := &opt.Options{} // Set default options - options.OpenFilesCacheCapacity = DefaultHandles - options.BlockCacheCapacity = DefaultCache / 2 * opt.MiB - options.WriteBuffer = DefaultCache / 4 * opt.MiB // Two of these are used internally + options := &opt.Options{ + OpenFilesCacheCapacity: DefaultHandles, + BlockCacheCapacity: DefaultCache / 2 * opt.MiB, + WriteBuffer: DefaultCache / 4 * opt.MiB, // Two of these are used internally + } return NewLevelDBStorageWithOpt(path, logger, options) } diff --git a/blockchain/storage/storage.go b/blockchain/storage/storage.go index 75232f21f5..2499d85e6d 100644 --- a/blockchain/storage/storage.go +++ b/blockchain/storage/storage.go @@ -38,7 +38,6 @@ type Storage interface { ReadTxLookup(hash types.Hash) (types.Hash, bool) NewBatch() Batch - WriteBatch() error Close() error } From 4e193947f8c4ae38e7ad04b81a52a57bbe4cc54d Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Wed, 7 Jun 2023 18:48:49 +0200 Subject: [PATCH 07/16] CR changes --- blockchain/storage/keyvalue.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/blockchain/storage/keyvalue.go b/blockchain/storage/keyvalue.go index b49c6bc69e..602ae3e78f 100644 --- a/blockchain/storage/keyvalue.go +++ b/blockchain/storage/keyvalue.go @@ -355,7 +355,3 @@ func (s *KeyValueStorage) Close() error { func (s *KeyValueStorage) NewBatch() Batch { return s.db.NewBatch() } - -func (s *KeyValueStorage) WriteBatch() error { - return s.db.NewBatch().Write() -} From 3cee4952af72992fe3d2446915c244722ea74574 Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Thu, 8 Jun 2023 09:21:24 +0200 Subject: [PATCH 08/16] CR changes --- blockchain/blockchain.go | 19 ++++++++------- blockchain/storage/batch_helper.go | 28 +++++++++++----------- blockchain/storage/leveldb/leveldb_test.go | 12 +++++----- blockchain/storage/testing.go | 4 ---- 4 files changed, 30 insertions(+), 33 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 04bb76f708..289f3df6ad 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -448,7 +448,7 @@ func (b *Blockchain) writeCanonicalHeader(batch *storage.BatchHelper, event *Eve } newTD := big.NewInt(0).Add(parentTD, new(big.Int).SetUint64(h.Difficulty)) - batch.WriteCanonicalHeader(h, newTD) + batch.PutCanonicalHeader(h, newTD) event.Type = EventHead event.AddNewHeader(h) @@ -866,7 +866,6 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro defer b.writeLock.Unlock() block := fblock.Block - batchHelper := storage.NewBatchHelper(b.db) if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -876,6 +875,8 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro header := block.Header + batchHelper := storage.NewBatchHelper(b.db) + if err := b.writeBody(batchHelper, block); err != nil { return err } @@ -889,7 +890,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // write the receipts, do it only after the header has been written. // Otherwise, a client might ask for a header once the receipt is valid, // but before it is written into the storage - batchHelper.WriteReceipts(block.Hash(), fblock.Receipts) + batchHelper.PutReceipts(block.Hash(), fblock.Receipts) // update snapshot if err := b.consensus.ProcessHeaders([]*types.Header{header}); err != nil { @@ -929,8 +930,6 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { b.writeLock.Lock() defer b.writeLock.Unlock() - batchHelper := storage.NewBatchHelper(b.db) - if block.Number() <= b.Header().Number { b.logger.Info("block already inserted", "block", block.Number(), "source", source) @@ -939,6 +938,8 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { header := block.Header + batchHelper := storage.NewBatchHelper(b.db) + if err := b.writeBody(batchHelper, block); err != nil { return err } @@ -1060,11 +1061,11 @@ func (b *Blockchain) writeBody(batch *storage.BatchHelper, block *types.Block) e } // Write the full body (txns + receipts) - batch.WriteBody(block.Header.Hash, block.Body()) + batch.PutBody(block.Header.Hash, block.Body()) // Write txn lookups (txHash -> block) for _, txn := range block.Transactions { - batch.WriteTxLookup(txn.Hash, block.Hash()) + batch.PutTxLookup(txn.Hash, block.Hash()) } return nil @@ -1207,7 +1208,7 @@ func (b *Blockchain) writeHeaderImpl(batch *storage.BatchHelper, evnt *Event, he return b.writeCanonicalHeader(batch, evnt, header) } - batch.WriteHeader(header) + batch.PutHeader(header) currentTD, ok := b.readTotalDifficulty(currentHeader.Hash) if !ok { @@ -1225,7 +1226,7 @@ func (b *Blockchain) writeHeaderImpl(batch *storage.BatchHelper, evnt *Event, he } // Write the difficulty - batch.WriteTotalDifficulty( + batch.PutTotalDifficulty( header.Hash, big.NewInt(0).Add( parentTD, diff --git a/blockchain/storage/batch_helper.go b/blockchain/storage/batch_helper.go index 89984122d0..91976cc6a7 100644 --- a/blockchain/storage/batch_helper.go +++ b/blockchain/storage/batch_helper.go @@ -24,48 +24,48 @@ func NewBatchHelper(storage Storage) *BatchHelper { return &BatchHelper{batch: batch} } -func (b *BatchHelper) WriteHeader(h *types.Header) { +func (b *BatchHelper) PutHeader(h *types.Header) { b.putRlp(HEADER, h.Hash.Bytes(), h) } -func (b *BatchHelper) WriteBody(hash types.Hash, body *types.Body) { +func (b *BatchHelper) PutBody(hash types.Hash, body *types.Body) { b.putRlp(BODY, hash.Bytes(), body) } -func (b *BatchHelper) WriteHeadHash(h types.Hash) { +func (b *BatchHelper) PutHeadHash(h types.Hash) { b.putWithPrefix(HEAD, HASH, h.Bytes()) } -func (b *BatchHelper) WriteTxLookup(hash types.Hash, blockHash types.Hash) { +func (b *BatchHelper) PutTxLookup(hash types.Hash, blockHash types.Hash) { ar := &fastrlp.Arena{} vr := ar.NewBytes(blockHash.Bytes()).MarshalTo(nil) b.putWithPrefix(TX_LOOKUP_PREFIX, hash.Bytes(), vr) } -func (b *BatchHelper) WriteHeadNumber(n uint64) { +func (b *BatchHelper) PutHeadNumber(n uint64) { b.putWithPrefix(HEAD, NUMBER, common.EncodeUint64ToBytes(n)) } -func (b *BatchHelper) WriteReceipts(hash types.Hash, receipts []*types.Receipt) { +func (b *BatchHelper) PutReceipts(hash types.Hash, receipts []*types.Receipt) { rr := types.Receipts(receipts) b.putRlp(RECEIPTS, hash.Bytes(), &rr) } -func (b *BatchHelper) WriteCanonicalHeader(h *types.Header, diff *big.Int) { - b.WriteHeader(h) - b.WriteHeadHash(h.Hash) - b.WriteHeadNumber(h.Number) - b.WriteCanonicalHash(h.Number, h.Hash) - b.WriteTotalDifficulty(h.Hash, diff) +func (b *BatchHelper) PutCanonicalHeader(h *types.Header, diff *big.Int) { + b.PutHeader(h) + b.PutHeadHash(h.Hash) + b.PutHeadNumber(h.Number) + b.PutCanonicalHash(h.Number, h.Hash) + b.PutTotalDifficulty(h.Hash, diff) } -func (b *BatchHelper) WriteCanonicalHash(n uint64, hash types.Hash) { +func (b *BatchHelper) PutCanonicalHash(n uint64, hash types.Hash) { b.putWithPrefix(CANONICAL, common.EncodeUint64ToBytes(n), hash.Bytes()) } -func (b *BatchHelper) WriteTotalDifficulty(hash types.Hash, diff *big.Int) { +func (b *BatchHelper) PutTotalDifficulty(hash types.Hash, diff *big.Int) { b.putWithPrefix(DIFFICULTY, hash.Bytes(), diff.Bytes()) } diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index f637f64cd6..5fab747f27 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -245,16 +245,16 @@ insertloop: case b := <-blockchain: batchHelper := storage.NewBatchHelper(s) - batchHelper.WriteBody(b.Block.Hash(), b.Block.Body()) + batchHelper.PutBody(b.Block.Hash(), b.Block.Body()) for _, tx := range b.Block.Transactions { - batchHelper.WriteTxLookup(tx.Hash, b.Block.Hash()) + batchHelper.PutTxLookup(tx.Hash, b.Block.Hash()) } - batchHelper.WriteHeader(b.Block.Header) - batchHelper.WriteHeadNumber(uint64(i)) - batchHelper.WriteHeadHash(b.Block.Header.Hash) - batchHelper.WriteReceipts(b.Block.Hash(), b.Receipts) + batchHelper.PutHeader(b.Block.Header) + batchHelper.PutHeadNumber(uint64(i)) + batchHelper.PutHeadHash(b.Block.Header.Hash) + batchHelper.PutReceipts(b.Block.Hash(), b.Receipts) if err := batchHelper.WriteBatch(); err != nil { require.NoError(t, err) diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index 960d8aab56..4a808839c0 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -737,7 +737,3 @@ func (m *MockStorage) HookClose(fn closeDelegate) { func (m *MockStorage) NewBatch() Batch { return nil } - -func (m *MockStorage) WriteBatch() error { - return nil -} From f20c42d05f63e15d7771d0cb8b0abc8b24b5b1bf Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Thu, 8 Jun 2023 10:37:31 +0200 Subject: [PATCH 09/16] DirSize function change --- blockchain/storage/leveldb/leveldb_test.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index 5fab747f27..8014479f5e 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -202,12 +202,14 @@ func generateBlocks(t *testing.T, count int, ch chan *types.FullBlock, ctx conte } } -func DirSize(path string) (int64, error) { +func dirSize(t *testing.T, path string) int64 { + t.Helper() + var size int64 err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { if err != nil { - return err + t.Fail() } if !info.IsDir() { size += info.Size() @@ -215,8 +217,11 @@ func DirSize(path string) (int64, error) { return err }) + if err != nil { + t.Log(err) + } - return size, err + return size } func TestWriteFullBlock(t *testing.T) { @@ -262,7 +267,7 @@ insertloop: fmt.Println("writing block", i) - size, _ := DirSize(path) + size := dirSize(t, path) fmt.Println("\tldb file count:", countLdbFilesInPath(path)) fmt.Println("\tdir size", size/1_000_000, "MBs") } From 8a85407868128b7706f6e09abe1e040a38e0e541 Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Wed, 21 Jun 2023 15:37:33 +0200 Subject: [PATCH 10/16] Dispatch event after batch write --- blockchain/blockchain.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 55b6ff4197..df9e5142a0 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -895,11 +895,15 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro return err } - b.dispatchEvent(evnt) - // Update the average gas price b.updateGasPriceAvgWithBlock(block) + if err := batchHelper.WriteBatch(); err != nil { + return err + } + + b.dispatchEvent(evnt) + logArgs := []interface{}{ "number", header.Number, "txs", len(block.Transactions), @@ -913,10 +917,6 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro logArgs = append(logArgs, "generation_time_in_seconds", diff) } - if err := batchHelper.WriteBatch(); err != nil { - return err - } - b.logger.Info("new block", logArgs...) return nil @@ -966,11 +966,15 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { return err } - b.dispatchEvent(evnt) - // Update the average gas price b.updateGasPriceAvgWithBlock(block) + if err := batchHelper.WriteBatch(); err != nil { + return err + } + + b.dispatchEvent(evnt) + logArgs := []interface{}{ "number", header.Number, "txs", len(block.Transactions), @@ -984,10 +988,6 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { logArgs = append(logArgs, "generation_time_in_seconds", diff) } - if err := batchHelper.WriteBatch(); err != nil { - return err - } - b.logger.Info("new block", logArgs...) return nil From ed159c9773fd22746326b4e44d0869301bb4b0f0 Mon Sep 17 00:00:00 2001 From: Marko Jelaca Date: Thu, 22 Jun 2023 09:47:04 +0200 Subject: [PATCH 11/16] Changed leveldb batch write unit test --- blockchain/storage/leveldb/leveldb_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index 8014479f5e..b129ff392f 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -2,7 +2,6 @@ package leveldb import ( "context" - "fmt" "math/big" "math/rand" "os" @@ -260,16 +259,17 @@ insertloop: batchHelper.PutHeadNumber(uint64(i)) batchHelper.PutHeadHash(b.Block.Header.Hash) batchHelper.PutReceipts(b.Block.Hash(), b.Receipts) + batchHelper.PutCanonicalHash(uint64(i), b.Block.Hash()) if err := batchHelper.WriteBatch(); err != nil { require.NoError(t, err) } - fmt.Println("writing block", i) + t.Logf("writing block %d", i) size := dirSize(t, path) - fmt.Println("\tldb file count:", countLdbFilesInPath(path)) - fmt.Println("\tdir size", size/1_000_000, "MBs") + t.Logf("\tldb file count: %d", countLdbFilesInPath(path)) + t.Logf("\tdir size %d MBs", size/1_000_000) } } } From 6c99846c3bad8d2471f72b16e7d5b6ff27da647e Mon Sep 17 00:00:00 2001 From: Igor Crevar Date: Mon, 3 Jul 2023 15:56:41 +0200 Subject: [PATCH 12/16] small changes --- blockchain/blockchain.go | 3 +-- blockchain/storage/batch_helper.go | 8 +++----- blockchain/storage/leveldb/batch.go | 11 +++++++---- blockchain/storage/memory/batch.go | 11 +++++++---- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index df9e5142a0..41cdf83167 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -1206,8 +1206,6 @@ func (b *Blockchain) writeHeaderImpl(batch *storage.BatchHelper, evnt *Event, he return b.writeCanonicalHeader(batch, evnt, header) } - batch.PutHeader(header) - currentTD, ok := b.readTotalDifficulty(currentHeader.Hash) if !ok { return errors.New("failed to get header difficulty") @@ -1223,6 +1221,7 @@ func (b *Blockchain) writeHeaderImpl(batch *storage.BatchHelper, evnt *Event, he ) } + batch.PutHeader(header) // Write the difficulty batch.PutTotalDifficulty( header.Hash, diff --git a/blockchain/storage/batch_helper.go b/blockchain/storage/batch_helper.go index 91976cc6a7..2bab97bc5d 100644 --- a/blockchain/storage/batch_helper.go +++ b/blockchain/storage/batch_helper.go @@ -11,7 +11,7 @@ import ( type Batch interface { Delete(key []byte) Write() error - Put(k []byte, data []byte) + Put(k []byte, v []byte) } type BatchHelper struct { @@ -19,9 +19,7 @@ type BatchHelper struct { } func NewBatchHelper(storage Storage) *BatchHelper { - batch := storage.NewBatch() - - return &BatchHelper{batch: batch} + return &BatchHelper{batch: storage.NewBatch()} } func (b *BatchHelper) PutHeader(h *types.Header) { @@ -82,7 +80,7 @@ func (b *BatchHelper) putRlp(p, k []byte, raw types.RLPMarshaler) { } func (b *BatchHelper) putWithPrefix(p, k, data []byte) { - fullKey := append(append([]byte{}, p...), k...) + fullKey := append(append(make([]byte, 0, len(p)+len(k)), p...), k...) b.batch.Put(fullKey, data) } diff --git a/blockchain/storage/leveldb/batch.go b/blockchain/storage/leveldb/batch.go index bfc8a51fb5..4a87712016 100644 --- a/blockchain/storage/leveldb/batch.go +++ b/blockchain/storage/leveldb/batch.go @@ -1,9 +1,12 @@ package leveldb import ( + "github.com/0xPolygon/polygon-edge/blockchain/storage" "github.com/syndtr/goleveldb/leveldb" ) +var _ storage.Batch = (*batchLevelDB)(nil) + type batchLevelDB struct { db *leveldb.DB b *leveldb.Batch @@ -20,10 +23,10 @@ func (b *batchLevelDB) Delete(key []byte) { b.b.Delete(key) } -func (b *batchLevelDB) Write() error { - return b.db.Write(b.b, nil) +func (b *batchLevelDB) Put(k []byte, v []byte) { + b.b.Put(k, v) } -func (b *batchLevelDB) Put(k []byte, data []byte) { - b.b.Put(k, data) +func (b *batchLevelDB) Write() error { + return b.db.Write(b.b, nil) } diff --git a/blockchain/storage/memory/batch.go b/blockchain/storage/memory/batch.go index fe4b1fabbc..e370dcf753 100644 --- a/blockchain/storage/memory/batch.go +++ b/blockchain/storage/memory/batch.go @@ -1,9 +1,12 @@ package memory import ( + "github.com/0xPolygon/polygon-edge/blockchain/storage" "github.com/0xPolygon/polygon-edge/helper/hex" ) +var _ storage.Batch = (*batchMemory)(nil) + type batchMemory struct { db map[string][]byte keysToDelete [][]byte @@ -18,6 +21,10 @@ func (b *batchMemory) Delete(key []byte) { b.keysToDelete = append(b.keysToDelete, key) } +func (b *batchMemory) Put(k []byte, v []byte) { + b.valuesToPut = append(b.valuesToPut, [2][]byte{k, v}) +} + func (b *batchMemory) Write() error { for _, x := range b.keysToDelete { delete(b.db, hex.EncodeToHex(x)) @@ -29,7 +36,3 @@ func (b *batchMemory) Write() error { return nil } - -func (b *batchMemory) Put(k []byte, data []byte) { - b.valuesToPut = append(b.valuesToPut, [2][]byte{k, data}) -} From 5c003da25308ebdd3dfefafe2851772d0abf1865 Mon Sep 17 00:00:00 2001 From: Igor Crevar Date: Mon, 3 Jul 2023 18:07:03 +0200 Subject: [PATCH 13/16] all writting is done by batch helper/writer. blockchain storage things are updated only after successful batch writeBatch --- blockchain/blockchain.go | 245 +++++++++------------- blockchain/blockchain_test.go | 11 +- blockchain/storage/batch_helper.go | 6 + blockchain/storage/keyvalue.go | 100 --------- blockchain/storage/storage.go | 11 - blockchain/storage/testing.go | 325 +++++++++-------------------- blockchain/testing.go | 9 +- 7 files changed, 218 insertions(+), 489 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 41cdf83167..5eb07fc697 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -400,19 +400,19 @@ func (b *Blockchain) writeGenesis(genesis *chain.Genesis) error { // writeGenesisImpl writes the genesis file to the DB + blockchain reference func (b *Blockchain) writeGenesisImpl(header *types.Header) error { - // Update the reference - b.genesis = header.Hash + batchHelper := storage.NewBatchHelper(b.db) - // Update the DB - if err := b.db.WriteHeader(header); err != nil { - return err - } + newTD := new(big.Int).SetUint64(header.Difficulty) + + batchHelper.PutCanonicalHeader(header, newTD) - // Advance the head - if _, err := b.advanceHead(header); err != nil { + if err := b.writeBatchAndUpdate(batchHelper, header, newTD, true); err != nil { return err } + // Update the reference + b.genesis = header.Hash + // Create an event and send it to the stream event := &Event{} event.AddNewHeader(header) @@ -440,66 +440,6 @@ func (b *Blockchain) GetTD(hash types.Hash) (*big.Int, bool) { return b.readTotalDifficulty(hash) } -// writeCanonicalHeader writes the new header -func (b *Blockchain) writeCanonicalHeader(batch *storage.BatchHelper, event *Event, h *types.Header) error { - parentTD, ok := b.readTotalDifficulty(h.ParentHash) - if !ok { - return fmt.Errorf("parent difficulty not found") - } - - newTD := big.NewInt(0).Add(parentTD, new(big.Int).SetUint64(h.Difficulty)) - batch.PutCanonicalHeader(h, newTD) - - event.Type = EventHead - event.AddNewHeader(h) - event.SetDifficulty(newTD) - - b.setCurrentHeader(h, newTD) - - return nil -} - -// advanceHead Sets the passed in header as the new head of the chain -func (b *Blockchain) advanceHead(newHeader *types.Header) (*big.Int, error) { - // Write the current head hash into storage - if err := b.db.WriteHeadHash(newHeader.Hash); err != nil { - return nil, err - } - - // Write the current head number into storage - if err := b.db.WriteHeadNumber(newHeader.Number); err != nil { - return nil, err - } - - // Matches the current head number with the current hash - if err := b.db.WriteCanonicalHash(newHeader.Number, newHeader.Hash); err != nil { - return nil, err - } - - // Check if there was a parent difficulty - parentTD := big.NewInt(0) - - if newHeader.ParentHash != types.StringToHash("") { - td, ok := b.readTotalDifficulty(newHeader.ParentHash) - if !ok { - return nil, fmt.Errorf("parent difficulty not found") - } - - parentTD = td - } - - // Calculate the new total difficulty - newTD := big.NewInt(0).Add(parentTD, big.NewInt(0).SetUint64(newHeader.Difficulty)) - if err := b.db.WriteTotalDifficulty(newHeader.Hash, newTD); err != nil { - return nil, err - } - - // Update the blockchain reference - b.setCurrentHeader(newHeader, newTD) - - return newTD, nil -} - // GetReceiptsByHash returns the receipts by their hash func (b *Blockchain) GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) { return b.db.ReadReceipts(hash) @@ -553,7 +493,11 @@ func (b *Blockchain) readBody(hash types.Hash) (*types.Body, bool) { // To return from field in the transactions of the past blocks if updated := b.recoverFromFieldsInTransactions(bb.Transactions); updated { - if err := b.db.WriteBody(hash, bb); err != nil { + batchHelper := storage.NewBatchHelper(b.db) + + batchHelper.PutBody(hash, bb) + + if err := batchHelper.WriteBatch(); err != nil { b.logger.Warn("failed to write body into storage", "hash", hash, "err", err) } } @@ -633,16 +577,17 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { } // Write the actual headers in separate batches for now - for _, h := range headers { + for _, header := range headers { event := &Event{} batchHelper := storage.NewBatchHelper(b.db) - if err := b.writeHeaderImpl(batchHelper, event, h); err != nil { + isCanonical, newTD, err := b.writeHeaderImpl(batchHelper, event, header) + if err != nil { return err } - if err := batchHelper.WriteBatch(); err != nil { + if err := b.writeBatchAndUpdate(batchHelper, header, newTD, isCanonical); err != nil { return err } @@ -881,7 +826,9 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // Write the header to the chain evnt := &Event{Source: source} - if err := b.writeHeaderImpl(batchHelper, evnt, header); err != nil { + + isCanonical, newTD, err := b.writeHeaderImpl(batchHelper, evnt, header) + if err != nil { return err } @@ -898,7 +845,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // Update the average gas price b.updateGasPriceAvgWithBlock(block) - if err := batchHelper.WriteBatch(); err != nil { + if err := b.writeBatchAndUpdate(batchHelper, header, newTD, isCanonical); err != nil { return err } @@ -944,7 +891,9 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { // Write the header to the chain evnt := &Event{Source: source} - if err := b.writeHeaderImpl(batchHelper, evnt, header); err != nil { + + isCanonical, newTD, err := b.writeHeaderImpl(batchHelper, evnt, header) + if err != nil { return err } @@ -957,9 +906,7 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { // write the receipts, do it only after the header has been written. // Otherwise, a client might ask for a header once the receipt is valid, // but before it is written into the storage - if err := b.db.WriteReceipts(block.Hash(), blockReceipts); err != nil { - return err - } + batchHelper.PutReceipts(block.Hash(), blockReceipts) // update snapshot if err := b.consensus.ProcessHeaders([]*types.Header{header}); err != nil { @@ -969,7 +916,7 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { // Update the average gas price b.updateGasPriceAvgWithBlock(block) - if err := batchHelper.WriteBatch(); err != nil { + if err := b.writeBatchAndUpdate(batchHelper, header, newTD, isCanonical); err != nil { return err } @@ -1197,70 +1144,74 @@ func (b *Blockchain) dispatchEvent(evnt *Event) { } // writeHeaderImpl writes a block and the data, assumes the genesis is already set -func (b *Blockchain) writeHeaderImpl(batch *storage.BatchHelper, evnt *Event, header *types.Header) error { - currentHeader := b.Header() - - // Write the data - if header.ParentHash == currentHeader.Hash { - // Fast path to save the new canonical header - return b.writeCanonicalHeader(batch, evnt, header) - } - - currentTD, ok := b.readTotalDifficulty(currentHeader.Hash) - if !ok { - return errors.New("failed to get header difficulty") - } - +// Returnning parameters (is canonical header, new total difficulty, error) +func (b *Blockchain) writeHeaderImpl( + batchHelper *storage.BatchHelper, evnt *Event, header *types.Header) (bool, *big.Int, error) { // parent total difficulty of incoming header parentTD, ok := b.readTotalDifficulty(header.ParentHash) if !ok { - return fmt.Errorf( + return false, nil, fmt.Errorf( "parent of %s (%d) not found", header.Hash.String(), header.Number, ) } - batch.PutHeader(header) - // Write the difficulty - batch.PutTotalDifficulty( - header.Hash, - big.NewInt(0).Add( - parentTD, - big.NewInt(0).SetUint64(header.Difficulty), - ), - ) + currentHeader := b.Header() + incomingTD := new(big.Int).Add(parentTD, new(big.Int).SetUint64(header.Difficulty)) + + // if parent of new header is current header just put everything in batch and update event + // new header will be canonical one + if header.ParentHash == currentHeader.Hash { + batchHelper.PutCanonicalHeader(header, incomingTD) + + evnt.Type = EventHead + evnt.AddNewHeader(header) + evnt.SetDifficulty(incomingTD) - // Update the headers cache - b.headersCache.Add(header.Hash, header) + return true, incomingTD, nil + } + + currentTD, ok := b.readTotalDifficulty(currentHeader.Hash) + if !ok { + return false, nil, errors.New("failed to get header difficulty") + } - incomingTD := big.NewInt(0).Add(parentTD, big.NewInt(0).SetUint64(header.Difficulty)) if incomingTD.Cmp(currentTD) > 0 { // new block has higher difficulty, reorg the chain - if err := b.handleReorg(evnt, currentHeader, header); err != nil { - return err + if err := b.handleReorg(batchHelper, evnt, currentHeader, header, incomingTD); err != nil { + return false, nil, err } - } else { - // new block has lower difficulty, create a new fork - evnt.AddOldHeader(header) - evnt.Type = EventFork - if err := b.writeFork(header); err != nil { - return err - } + batchHelper.PutCanonicalHeader(header, incomingTD) + + return true, incomingTD, nil } - return nil + forks, err := b.getForksToWrite(header) + if err != nil { + return false, nil, err + } + + batchHelper.PutHeader(header) + batchHelper.PutTotalDifficulty(header.Hash, incomingTD) + batchHelper.PutForks(forks) + + // new block has lower difficulty, create a new fork + evnt.AddOldHeader(header) + evnt.Type = EventFork + + return false, nil, nil } -// writeFork writes the new header forks to the DB -func (b *Blockchain) writeFork(header *types.Header) error { +// getForksToWrite retrieves new header forks that should be written to the DB +func (b *Blockchain) getForksToWrite(header *types.Header) ([]types.Hash, error) { forks, err := b.db.ReadForks() if err != nil { if errors.Is(err, storage.ErrNotFound) { forks = []types.Hash{} } else { - return err + return nil, err } } @@ -1272,19 +1223,16 @@ func (b *Blockchain) writeFork(header *types.Header) error { } } - newForks = append(newForks, header.Hash) - if err := b.db.WriteForks(newForks); err != nil { - return err - } - - return nil + return append(newForks, header.Hash), nil } // handleReorg handles a reorganization event func (b *Blockchain) handleReorg( + batchHelper *storage.BatchHelper, evnt *Event, oldHeader *types.Header, newHeader *types.Header, + newTD *big.Int, ) error { newChainHead := newHeader oldChainHead := oldHeader @@ -1328,6 +1276,18 @@ func (b *Blockchain) handleReorg( oldChain = append(oldChain, oldHeader) } + forks, err := b.getForksToWrite(oldChainHead) + if err != nil { + return fmt.Errorf("failed to write the old header as fork: %w", err) + } + + batchHelper.PutForks(forks) + + // Update canonical chain numbers + for _, h := range newChain { + batchHelper.PutCanonicalHash(h.Number, h.Hash) + } + for _, b := range oldChain[:len(oldChain)-1] { evnt.AddOldHeader(b) } @@ -1339,25 +1299,9 @@ func (b *Blockchain) handleReorg( evnt.AddNewHeader(b) } - if err := b.writeFork(oldChainHead); err != nil { - return fmt.Errorf("failed to write the old header as fork: %w", err) - } - - // Update canonical chain numbers - for _, h := range newChain { - if err := b.db.WriteCanonicalHash(h.Number, h.Hash); err != nil { - return err - } - } - - diff, err := b.advanceHead(newChainHead) - if err != nil { - return err - } - // Set the event type and difficulty evnt.Type = EventReorg - evnt.SetDifficulty(diff) + evnt.SetDifficulty(newTD) return nil } @@ -1448,3 +1392,20 @@ func calcBaseFeeDelta(gasUsedDelta, parentGasTarget, baseFee uint64) uint64 { return y / defaultBaseFeeChangeDenom } + +func (b *Blockchain) writeBatchAndUpdate( + batchHelper *storage.BatchHelper, + header *types.Header, + newTD *big.Int, + isCanonnical bool) error { + if err := batchHelper.WriteBatch(); err != nil { + return err + } + + if isCanonnical { + b.headersCache.Add(header.Hash, header) + b.setCurrentHeader(header, newTD) // Update the blockchain reference + } + + return nil +} diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index e5060ae255..1d709b56de 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -25,8 +25,7 @@ func TestGenesis(t *testing.T) { genesis := &types.Header{Difficulty: 1, Number: 0} genesis.ComputeHash() - _, err := b.advanceHead(genesis) - assert.NoError(t, err) + assert.NoError(t, b.writeGenesisImpl(genesis)) header := b.Header() assert.Equal(t, header.Hash, genesis.Hash) @@ -533,8 +532,12 @@ func TestForkUnknownParents(t *testing.T) { h1 := AppendNewTestHeaders(h0[:5], 10) // Write genesis - _, err := b.advanceHead(h0[0]) - assert.NoError(t, err) + bh := storage.NewBatchHelper(b.db) + td := new(big.Int).SetUint64(h0[0].Difficulty) + + bh.PutCanonicalHeader(h0[0], td) + + assert.NoError(t, b.writeBatchAndUpdate(bh, h0[0], td, true)) // Write 10 headers assert.NoError(t, b.WriteHeaders(h0[1:])) diff --git a/blockchain/storage/batch_helper.go b/blockchain/storage/batch_helper.go index 2bab97bc5d..ec4a952d81 100644 --- a/blockchain/storage/batch_helper.go +++ b/blockchain/storage/batch_helper.go @@ -67,6 +67,12 @@ func (b *BatchHelper) PutTotalDifficulty(hash types.Hash, diff *big.Int) { b.putWithPrefix(DIFFICULTY, hash.Bytes(), diff.Bytes()) } +func (b *BatchHelper) PutForks(forks []types.Hash) { + ff := Forks(forks) + + b.putRlp(FORK, EMPTY, &ff) +} + func (b *BatchHelper) putRlp(p, k []byte, raw types.RLPMarshaler) { var data []byte diff --git a/blockchain/storage/keyvalue.go b/blockchain/storage/keyvalue.go index 602ae3e78f..f1e0ea7bd5 100644 --- a/blockchain/storage/keyvalue.go +++ b/blockchain/storage/keyvalue.go @@ -92,11 +92,6 @@ func (s *KeyValueStorage) ReadCanonicalHash(n uint64) (types.Hash, bool) { return types.BytesToHash(data), true } -// WriteCanonicalHash writes a hash for a number block in the canonical chain -func (s *KeyValueStorage) WriteCanonicalHash(n uint64, hash types.Hash) error { - return s.set(CANONICAL, s.encodeUint(n), hash.Bytes()) -} - // HEAD // // ReadHeadHash returns the hash of the head @@ -123,25 +118,8 @@ func (s *KeyValueStorage) ReadHeadNumber() (uint64, bool) { return s.decodeUint(data), true } -// WriteHeadHash writes the hash of the head -func (s *KeyValueStorage) WriteHeadHash(h types.Hash) error { - return s.set(HEAD, HASH, h.Bytes()) -} - -// WriteHeadNumber writes the number of the head -func (s *KeyValueStorage) WriteHeadNumber(n uint64) error { - return s.set(HEAD, NUMBER, s.encodeUint(n)) -} - // FORK // -// WriteForks writes the current forks -func (s *KeyValueStorage) WriteForks(forks []types.Hash) error { - ff := Forks(forks) - - return s.writeRLP(FORK, EMPTY, &ff) -} - // ReadForks read the current forks func (s *KeyValueStorage) ReadForks() ([]types.Hash, error) { forks := &Forks{} @@ -152,11 +130,6 @@ func (s *KeyValueStorage) ReadForks() ([]types.Hash, error) { // DIFFICULTY // -// WriteTotalDifficulty writes the difficulty -func (s *KeyValueStorage) WriteTotalDifficulty(hash types.Hash, diff *big.Int) error { - return s.set(DIFFICULTY, hash.Bytes(), diff.Bytes()) -} - // ReadTotalDifficulty reads the difficulty func (s *KeyValueStorage) ReadTotalDifficulty(hash types.Hash) (*big.Int, bool) { v, ok := s.get(DIFFICULTY, hash.Bytes()) @@ -169,11 +142,6 @@ func (s *KeyValueStorage) ReadTotalDifficulty(hash types.Hash) (*big.Int, bool) // HEADER // -// WriteHeader writes the header -func (s *KeyValueStorage) WriteHeader(h *types.Header) error { - return s.writeRLP(HEADER, h.Hash.Bytes(), h) -} - // ReadHeader reads the header func (s *KeyValueStorage) ReadHeader(hash types.Hash) (*types.Header, error) { header := &types.Header{} @@ -182,38 +150,8 @@ func (s *KeyValueStorage) ReadHeader(hash types.Hash) (*types.Header, error) { return header, err } -// WriteCanonicalHeader implements the storage interface -func (s *KeyValueStorage) WriteCanonicalHeader(h *types.Header, diff *big.Int) error { - if err := s.WriteHeader(h); err != nil { - return err - } - - if err := s.WriteHeadHash(h.Hash); err != nil { - return err - } - - if err := s.WriteHeadNumber(h.Number); err != nil { - return err - } - - if err := s.WriteCanonicalHash(h.Number, h.Hash); err != nil { - return err - } - - if err := s.WriteTotalDifficulty(h.Hash, diff); err != nil { - return err - } - - return nil -} - // BODY // -// WriteBody writes the body -func (s *KeyValueStorage) WriteBody(hash types.Hash, body *types.Body) error { - return s.writeRLP(BODY, hash.Bytes(), body) -} - // ReadBody reads the body func (s *KeyValueStorage) ReadBody(hash types.Hash) (*types.Body, error) { body := &types.Body{} @@ -224,13 +162,6 @@ func (s *KeyValueStorage) ReadBody(hash types.Hash) (*types.Body, error) { // RECEIPTS // -// WriteReceipts writes the receipts -func (s *KeyValueStorage) WriteReceipts(hash types.Hash, receipts []*types.Receipt) error { - rr := types.Receipts(receipts) - - return s.writeRLP(RECEIPTS, hash.Bytes(), &rr) -} - // ReadReceipts reads the receipts func (s *KeyValueStorage) ReadReceipts(hash types.Hash) ([]*types.Receipt, error) { receipts := &types.Receipts{} @@ -241,14 +172,6 @@ func (s *KeyValueStorage) ReadReceipts(hash types.Hash) ([]*types.Receipt, error // TX LOOKUP // -// WriteTxLookup maps the transaction hash to the block hash -func (s *KeyValueStorage) WriteTxLookup(hash types.Hash, blockHash types.Hash) error { - ar := &fastrlp.Arena{} - vr := ar.NewBytes(blockHash.Bytes()) - - return s.write2(TX_LOOKUP_PREFIX, hash.Bytes(), vr) -} - // ReadTxLookup reads the block hash using the transaction hash func (s *KeyValueStorage) ReadTxLookup(hash types.Hash) (types.Hash, bool) { parser := &fastrlp.Parser{} @@ -270,17 +193,6 @@ func (s *KeyValueStorage) ReadTxLookup(hash types.Hash) (types.Hash, bool) { // WRITE OPERATIONS // -func (s *KeyValueStorage) writeRLP(p, k []byte, raw types.RLPMarshaler) error { - var data []byte - if obj, ok := raw.(types.RLPStoreMarshaler); ok { - data = obj.MarshalStoreRLPTo(nil) - } else { - data = raw.MarshalRLPTo(nil) - } - - return s.set(p, k, data) -} - var ErrNotFound = fmt.Errorf("not found") func (s *KeyValueStorage) readRLP(p, k []byte, raw types.RLPUnmarshaler) error { @@ -324,18 +236,6 @@ func (s *KeyValueStorage) read2(p, k []byte, parser *fastrlp.Parser) *fastrlp.Va return v } -func (s *KeyValueStorage) write2(p, k []byte, v *fastrlp.Value) error { - dst := v.MarshalTo(nil) - - return s.set(p, k, dst) -} - -func (s *KeyValueStorage) set(p []byte, k []byte, v []byte) error { - p = append(p, k...) - - return s.db.Set(p, v) -} - func (s *KeyValueStorage) get(p []byte, k []byte) ([]byte, bool) { p = append(p, k...) data, ok, err := s.db.Get(p) diff --git a/blockchain/storage/storage.go b/blockchain/storage/storage.go index 2499d85e6d..15caaf111d 100644 --- a/blockchain/storage/storage.go +++ b/blockchain/storage/storage.go @@ -10,31 +10,20 @@ import ( // Storage is a generic blockchain storage type Storage interface { ReadCanonicalHash(n uint64) (types.Hash, bool) - WriteCanonicalHash(n uint64, hash types.Hash) error ReadHeadHash() (types.Hash, bool) ReadHeadNumber() (uint64, bool) - WriteHeadHash(h types.Hash) error - WriteHeadNumber(uint64) error - WriteForks(forks []types.Hash) error ReadForks() ([]types.Hash, error) - WriteTotalDifficulty(hash types.Hash, diff *big.Int) error ReadTotalDifficulty(hash types.Hash) (*big.Int, bool) - WriteHeader(h *types.Header) error ReadHeader(hash types.Hash) (*types.Header, error) - WriteCanonicalHeader(h *types.Header, diff *big.Int) error - - WriteBody(hash types.Hash, body *types.Body) error ReadBody(hash types.Hash) (*types.Body, error) - WriteReceipts(hash types.Hash, receipts []*types.Receipt) error ReadReceipts(hash types.Hash) ([]*types.Receipt, error) - WriteTxLookup(hash types.Hash, blockHash types.Hash) error ReadTxLookup(hash types.Hash) (types.Hash, bool) NewBatch() Batch diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index 4a808839c0..b61a2b192a 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygon/polygon-edge/helper/hex" "github.com/0xPolygon/polygon-edge/types" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type PlaceholderStorage func(t *testing.T) (Storage, func()) @@ -76,6 +77,8 @@ func testCanonicalChain(t *testing.T, m PlaceholderStorage) { } for _, cc := range cases { + batch := NewBatchHelper(s) + h := &types.Header{ Number: cc.Number, ParentHash: cc.ParentHash, @@ -84,13 +87,10 @@ func testCanonicalChain(t *testing.T, m PlaceholderStorage) { hash := h.Hash - if err := s.WriteHeader(h); err != nil { - t.Fatal(err) - } + batch.PutHeader(h) + batch.PutCanonicalHash(cc.Number, hash) - if err := s.WriteCanonicalHash(cc.Number, hash); err != nil { - t.Fatal(err) - } + require.NoError(t, batch.WriteBatch()) data, ok := s.ReadCanonicalHash(cc.Number) if !ok { @@ -124,6 +124,8 @@ func testDifficulty(t *testing.T, m PlaceholderStorage) { } for indx, cc := range cases { + batch := NewBatchHelper(s) + h := &types.Header{ Number: uint64(indx), ExtraData: []byte{}, @@ -131,13 +133,10 @@ func testDifficulty(t *testing.T, m PlaceholderStorage) { hash := h.Hash - if err := s.WriteHeader(h); err != nil { - t.Fatal(err) - } + batch.PutHeader(h) + batch.PutTotalDifficulty(hash, cc.Diff) - if err := s.WriteTotalDifficulty(hash, cc.Diff); err != nil { - t.Fatal(err) - } + require.NoError(t, batch.WriteBatch()) diff, ok := s.ReadTotalDifficulty(hash) if !ok { @@ -157,23 +156,19 @@ func testHead(t *testing.T, m PlaceholderStorage) { defer closeFn() for i := uint64(0); i < 5; i++ { + batch := NewBatchHelper(s) + h := &types.Header{ Number: i, ExtraData: []byte{}, } hash := h.Hash - if err := s.WriteHeader(h); err != nil { - t.Fatal(err) - } + batch.PutHeader(h) + batch.PutHeadNumber(i) + batch.PutHeadHash(hash) - if err := s.WriteHeadNumber(i); err != nil { - t.Fatal(err) - } - - if err := s.WriteHeadHash(hash); err != nil { - t.Fatal(err) - } + require.NoError(t, batch.WriteBatch()) n2, ok := s.ReadHeadNumber() if !ok { @@ -209,9 +204,11 @@ func testForks(t *testing.T, m PlaceholderStorage) { } for _, cc := range cases { - if err := s.WriteForks(cc.Forks); err != nil { - t.Fatal(err) - } + batch := NewBatchHelper(s) + + batch.PutForks(cc.Forks) + + require.NoError(t, batch.WriteBatch()) forks, err := s.ReadForks() assert.NoError(t, err) @@ -238,9 +235,11 @@ func testHeader(t *testing.T, m PlaceholderStorage) { } header.ComputeHash() - if err := s.WriteHeader(header); err != nil { - t.Fatal(err) - } + batch := NewBatchHelper(s) + + batch.PutHeader(header) + + require.NoError(t, batch.WriteBatch()) header1, err := s.ReadHeader(header.Hash) assert.NoError(t, err) @@ -263,9 +262,12 @@ func testBody(t *testing.T, m PlaceholderStorage) { Timestamp: 10, ExtraData: []byte{}, // if not set it will fail } - if err := s.WriteHeader(header); err != nil { - t.Fatal(err) - } + + batch := NewBatchHelper(s) + + batch.PutHeader(header) + + require.NoError(t, batch.WriteBatch()) addr1 := types.StringToAddress("11") t0 := &types.Transaction{ @@ -296,8 +298,12 @@ func testBody(t *testing.T, m PlaceholderStorage) { Transactions: []*types.Transaction{t0, t1}, } + batch2 := NewBatchHelper(s) body0 := block.Body() - assert.NoError(t, s.WriteBody(header.Hash, body0)) + + batch2.PutBody(header.Hash, body0) + + require.NoError(t, batch2.WriteBatch()) body1, err := s.ReadBody(header.Hash) assert.NoError(t, err) @@ -321,65 +327,64 @@ func testReceipts(t *testing.T, m PlaceholderStorage) { s, closeFn := m(t) defer closeFn() + batch := NewBatchHelper(s) + h := &types.Header{ Difficulty: 133, Number: 11, ExtraData: []byte{}, } - if err := s.WriteHeader(h); err != nil { - t.Fatal(err) - } + h.ComputeHash() - txn := &types.Transaction{ - Nonce: 1000, - Gas: 50, - GasPrice: new(big.Int).SetUint64(100), - V: big.NewInt(11), - } body := &types.Body{ - Transactions: []*types.Transaction{txn}, - } - - if err := s.WriteBody(h.Hash, body); err != nil { - t.Fatal(err) - } - - r0 := &types.Receipt{ - Root: types.StringToHash("1"), - CumulativeGasUsed: 10, - TxHash: txn.Hash, - LogsBloom: types.Bloom{0x1}, - Logs: []*types.Log{ - { - Address: addr1, - Topics: []types.Hash{hash1, hash2}, - Data: []byte{0x1, 0x2}, - }, + Transactions: []*types.Transaction{ { - Address: addr2, - Topics: []types.Hash{hash1}, + Nonce: 1000, + Gas: 50, + GasPrice: new(big.Int).SetUint64(100), + V: big.NewInt(11), }, }, } - r1 := &types.Receipt{ - Root: types.StringToHash("1"), - CumulativeGasUsed: 10, - TxHash: txn.Hash, - LogsBloom: types.Bloom{0x1}, - GasUsed: 10, - ContractAddress: &types.Address{0x1}, - Logs: []*types.Log{ - { - Address: addr2, - Topics: []types.Hash{hash1}, + receipts := []*types.Receipt{ + { + Root: types.StringToHash("1"), + CumulativeGasUsed: 10, + TxHash: body.Transactions[0].Hash, + LogsBloom: types.Bloom{0x1}, + Logs: []*types.Log{ + { + Address: addr1, + Topics: []types.Hash{hash1, hash2}, + Data: []byte{0x1, 0x2}, + }, + { + Address: addr2, + Topics: []types.Hash{hash1}, + }, + }, + }, + { + Root: types.StringToHash("1"), + CumulativeGasUsed: 10, + TxHash: body.Transactions[0].Hash, + LogsBloom: types.Bloom{0x1}, + GasUsed: 10, + ContractAddress: &types.Address{0x1}, + Logs: []*types.Log{ + { + Address: addr2, + Topics: []types.Hash{hash1}, + }, }, }, } - receipts := []*types.Receipt{r0, r1} - if err := s.WriteReceipts(h.Hash, receipts); err != nil { - t.Fatal(err) - } + batch.PutHeader(h) + batch.PutBody(h.Hash, body) + batch.PutReceipts(h.Hash, receipts) + + require.NoError(t, batch.WriteBatch()) found, err := s.ReadReceipts(h.Hash) if err != nil { @@ -402,10 +407,11 @@ func testWriteCanonicalHeader(t *testing.T, m PlaceholderStorage) { h.ComputeHash() diff := new(big.Int).SetUint64(100) + batch := NewBatchHelper(s) - if err := s.WriteCanonicalHeader(h, diff); err != nil { - t.Fatal(err) - } + batch.PutCanonicalHeader(h, diff) + + require.NoError(t, batch.WriteBatch()) hh, err := s.ReadHeader(h.Hash) assert.NoError(t, err) @@ -445,49 +451,28 @@ func testWriteCanonicalHeader(t *testing.T, m PlaceholderStorage) { // Storage delegators type readCanonicalHashDelegate func(uint64) (types.Hash, bool) -type writeCanonicalHashDelegate func(uint64, types.Hash) error type readHeadHashDelegate func() (types.Hash, bool) type readHeadNumberDelegate func() (uint64, bool) -type writeHeadHashDelegate func(types.Hash) error -type writeHeadNumberDelegate func(uint64) error -type writeForksDelegate func([]types.Hash) error type readForksDelegate func() ([]types.Hash, error) -type writeTotalDifficultyDelegate func(types.Hash, *big.Int) error type readTotalDifficultyDelegate func(types.Hash) (*big.Int, bool) -type writeHeaderDelegate func(*types.Header) error type readHeaderDelegate func(types.Hash) (*types.Header, error) -type writeCanonicalHeaderDelegate func(*types.Header, *big.Int) error -type writeBodyDelegate func(types.Hash, *types.Body) error type readBodyDelegate func(types.Hash) (*types.Body, error) -type writeSnapshotDelegate func(types.Hash, []byte) error type readSnapshotDelegate func(types.Hash) ([]byte, bool) -type writeReceiptsDelegate func(types.Hash, []*types.Receipt) error type readReceiptsDelegate func(types.Hash) ([]*types.Receipt, error) -type writeTxLookupDelegate func(types.Hash, types.Hash) error type readTxLookupDelegate func(types.Hash) (types.Hash, bool) type closeDelegate func() error type MockStorage struct { - readCanonicalHashFn readCanonicalHashDelegate - writeCanonicalHashFn writeCanonicalHashDelegate - readHeadHashFn readHeadHashDelegate - readHeadNumberFn readHeadNumberDelegate - writeHeadHashFn writeHeadHashDelegate - writeHeadNumberFn writeHeadNumberDelegate - writeForksFn writeForksDelegate - readForksFn readForksDelegate - writeTotalDifficultyFn writeTotalDifficultyDelegate - readTotalDifficultyFn readTotalDifficultyDelegate - writeHeaderFn writeHeaderDelegate - readHeaderFn readHeaderDelegate - writeCanonicalHeaderFn writeCanonicalHeaderDelegate - writeBodyFn writeBodyDelegate - readBodyFn readBodyDelegate - writeReceiptsFn writeReceiptsDelegate - readReceiptsFn readReceiptsDelegate - writeTxLookupFn writeTxLookupDelegate - readTxLookupFn readTxLookupDelegate - closeFn closeDelegate + readCanonicalHashFn readCanonicalHashDelegate + readHeadHashFn readHeadHashDelegate + readHeadNumberFn readHeadNumberDelegate + readForksFn readForksDelegate + readTotalDifficultyFn readTotalDifficultyDelegate + readHeaderFn readHeaderDelegate + readBodyFn readBodyDelegate + readReceiptsFn readReceiptsDelegate + readTxLookupFn readTxLookupDelegate + closeFn closeDelegate } func NewMockStorage() *MockStorage { @@ -506,18 +491,6 @@ func (m *MockStorage) HookReadCanonicalHash(fn readCanonicalHashDelegate) { m.readCanonicalHashFn = fn } -func (m *MockStorage) WriteCanonicalHash(n uint64, hash types.Hash) error { - if m.writeCanonicalHashFn != nil { - return m.writeCanonicalHashFn(n, hash) - } - - return nil -} - -func (m *MockStorage) HookWriteCanonicalHash(fn writeCanonicalHashDelegate) { - m.writeCanonicalHashFn = fn -} - func (m *MockStorage) ReadHeadHash() (types.Hash, bool) { if m.readHeadHashFn != nil { return m.readHeadHashFn() @@ -542,42 +515,6 @@ func (m *MockStorage) HookReadHeadNumber(fn readHeadNumberDelegate) { m.readHeadNumberFn = fn } -func (m *MockStorage) WriteHeadHash(h types.Hash) error { - if m.writeHeadHashFn != nil { - return m.writeHeadHashFn(h) - } - - return nil -} - -func (m *MockStorage) HookWriteHeadHash(fn writeHeadHashDelegate) { - m.writeHeadHashFn = fn -} - -func (m *MockStorage) WriteHeadNumber(n uint64) error { - if m.writeHeadNumberFn != nil { - return m.writeHeadNumberFn(n) - } - - return nil -} - -func (m *MockStorage) HookWriteHeadNumber(fn writeHeadNumberDelegate) { - m.writeHeadNumberFn = fn -} - -func (m *MockStorage) WriteForks(forks []types.Hash) error { - if m.writeForksFn != nil { - return m.writeForksFn(forks) - } - - return nil -} - -func (m *MockStorage) HookWriteForks(fn writeForksDelegate) { - m.writeForksFn = fn -} - func (m *MockStorage) ReadForks() ([]types.Hash, error) { if m.readForksFn != nil { return m.readForksFn() @@ -590,18 +527,6 @@ func (m *MockStorage) HookReadForks(fn readForksDelegate) { m.readForksFn = fn } -func (m *MockStorage) WriteTotalDifficulty(hash types.Hash, diff *big.Int) error { - if m.writeTotalDifficultyFn != nil { - return m.writeTotalDifficultyFn(hash, diff) - } - - return nil -} - -func (m *MockStorage) HookWriteTotalDifficulty(fn writeTotalDifficultyDelegate) { - m.writeTotalDifficultyFn = fn -} - func (m *MockStorage) ReadTotalDifficulty(hash types.Hash) (*big.Int, bool) { if m.readTotalDifficultyFn != nil { return m.readTotalDifficultyFn(hash) @@ -614,18 +539,6 @@ func (m *MockStorage) HookReadTotalDifficulty(fn readTotalDifficultyDelegate) { m.readTotalDifficultyFn = fn } -func (m *MockStorage) WriteHeader(h *types.Header) error { - if m.writeHeaderFn != nil { - return m.writeHeaderFn(h) - } - - return nil -} - -func (m *MockStorage) HookWriteHeader(fn writeHeaderDelegate) { - m.writeHeaderFn = fn -} - func (m *MockStorage) ReadHeader(hash types.Hash) (*types.Header, error) { if m.readHeaderFn != nil { return m.readHeaderFn(hash) @@ -638,30 +551,6 @@ func (m *MockStorage) HookReadHeader(fn readHeaderDelegate) { m.readHeaderFn = fn } -func (m *MockStorage) WriteCanonicalHeader(h *types.Header, diff *big.Int) error { - if m.writeCanonicalHeaderFn != nil { - return m.writeCanonicalHeaderFn(h, diff) - } - - return nil -} - -func (m *MockStorage) HookWriteCanonicalHeader(fn writeCanonicalHeaderDelegate) { - m.writeCanonicalHeaderFn = fn -} - -func (m *MockStorage) WriteBody(hash types.Hash, body *types.Body) error { - if m.writeBodyFn != nil { - return m.writeBodyFn(hash, body) - } - - return nil -} - -func (m *MockStorage) HookWriteBody(fn writeBodyDelegate) { - m.writeBodyFn = fn -} - func (m *MockStorage) ReadBody(hash types.Hash) (*types.Body, error) { if m.readBodyFn != nil { return m.readBodyFn(hash) @@ -674,18 +563,6 @@ func (m *MockStorage) HookReadBody(fn readBodyDelegate) { m.readBodyFn = fn } -func (m *MockStorage) WriteReceipts(hash types.Hash, receipts []*types.Receipt) error { - if m.writeReceiptsFn != nil { - return m.writeReceiptsFn(hash, receipts) - } - - return nil -} - -func (m *MockStorage) HookWriteReceipts(fn writeReceiptsDelegate) { - m.writeReceiptsFn = fn -} - func (m *MockStorage) ReadReceipts(hash types.Hash) ([]*types.Receipt, error) { if m.readReceiptsFn != nil { return m.readReceiptsFn(hash) @@ -698,18 +575,6 @@ func (m *MockStorage) HookReadReceipts(fn readReceiptsDelegate) { m.readReceiptsFn = fn } -func (m *MockStorage) WriteTxLookup(hash types.Hash, blockHash types.Hash) error { - if m.writeTxLookupFn != nil { - return m.writeTxLookupFn(hash, blockHash) - } - - return nil -} - -func (m *MockStorage) HookWriteTxLookup(fn writeTxLookupDelegate) { - m.writeTxLookupFn = fn -} - func (m *MockStorage) ReadTxLookup(hash types.Hash) (types.Hash, bool) { if m.readTxLookupFn != nil { return m.readTxLookupFn(hash) diff --git a/blockchain/testing.go b/blockchain/testing.go index e0f30864eb..a9c4467376 100644 --- a/blockchain/testing.go +++ b/blockchain/testing.go @@ -116,8 +116,13 @@ func NewTestBlockchain(t *testing.T, headers []*types.Header) *Blockchain { t.Fatal(err) } - if headers != nil { - if _, err := b.advanceHead(headers[0]); err != nil { + if len(headers) > 0 { + bh := storage.NewBatchHelper(b.db) + td := new(big.Int).SetUint64(headers[0].Difficulty) + + bh.PutCanonicalHeader(headers[0], td) + + if err := b.writeBatchAndUpdate(bh, headers[0], td, true); err != nil { t.Fatal(err) } From 10bd93ffd7b5c6b66232d472b83eed0ca024bf74 Mon Sep 17 00:00:00 2001 From: Igor Crevar Date: Mon, 3 Jul 2023 18:13:25 +0200 Subject: [PATCH 14/16] rename helper to writer --- blockchain/blockchain.go | 68 +++++++++---------- blockchain/blockchain_test.go | 30 ++++---- .../{batch_helper.go => batch_writer.go} | 32 ++++----- blockchain/storage/leveldb/leveldb_test.go | 18 ++--- blockchain/storage/testing.go | 18 ++--- blockchain/testing.go | 6 +- 6 files changed, 86 insertions(+), 86 deletions(-) rename blockchain/storage/{batch_helper.go => batch_writer.go} (60%) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 5eb07fc697..588f4dbede 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -400,13 +400,13 @@ func (b *Blockchain) writeGenesis(genesis *chain.Genesis) error { // writeGenesisImpl writes the genesis file to the DB + blockchain reference func (b *Blockchain) writeGenesisImpl(header *types.Header) error { - batchHelper := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) newTD := new(big.Int).SetUint64(header.Difficulty) - batchHelper.PutCanonicalHeader(header, newTD) + batchWriter.PutCanonicalHeader(header, newTD) - if err := b.writeBatchAndUpdate(batchHelper, header, newTD, true); err != nil { + if err := b.writeBatchAndUpdate(batchWriter, header, newTD, true); err != nil { return err } @@ -493,11 +493,11 @@ func (b *Blockchain) readBody(hash types.Hash) (*types.Body, bool) { // To return from field in the transactions of the past blocks if updated := b.recoverFromFieldsInTransactions(bb.Transactions); updated { - batchHelper := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) - batchHelper.PutBody(hash, bb) + batchWriter.PutBody(hash, bb) - if err := batchHelper.WriteBatch(); err != nil { + if err := batchWriter.WriteBatch(); err != nil { b.logger.Warn("failed to write body into storage", "hash", hash, "err", err) } } @@ -580,14 +580,14 @@ func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { for _, header := range headers { event := &Event{} - batchHelper := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) - isCanonical, newTD, err := b.writeHeaderImpl(batchHelper, event, header) + isCanonical, newTD, err := b.writeHeaderImpl(batchWriter, event, header) if err != nil { return err } - if err := b.writeBatchAndUpdate(batchHelper, header, newTD, isCanonical); err != nil { + if err := b.writeBatchAndUpdate(batchWriter, header, newTD, isCanonical); err != nil { return err } @@ -818,16 +818,16 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro header := block.Header - batchHelper := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) - if err := b.writeBody(batchHelper, block); err != nil { + if err := b.writeBody(batchWriter, block); err != nil { return err } // Write the header to the chain evnt := &Event{Source: source} - isCanonical, newTD, err := b.writeHeaderImpl(batchHelper, evnt, header) + isCanonical, newTD, err := b.writeHeaderImpl(batchWriter, evnt, header) if err != nil { return err } @@ -835,7 +835,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // write the receipts, do it only after the header has been written. // Otherwise, a client might ask for a header once the receipt is valid, // but before it is written into the storage - batchHelper.PutReceipts(block.Hash(), fblock.Receipts) + batchWriter.PutReceipts(block.Hash(), fblock.Receipts) // update snapshot if err := b.consensus.ProcessHeaders([]*types.Header{header}); err != nil { @@ -845,7 +845,7 @@ func (b *Blockchain) WriteFullBlock(fblock *types.FullBlock, source string) erro // Update the average gas price b.updateGasPriceAvgWithBlock(block) - if err := b.writeBatchAndUpdate(batchHelper, header, newTD, isCanonical); err != nil { + if err := b.writeBatchAndUpdate(batchWriter, header, newTD, isCanonical); err != nil { return err } @@ -883,16 +883,16 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { header := block.Header - batchHelper := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) - if err := b.writeBody(batchHelper, block); err != nil { + if err := b.writeBody(batchWriter, block); err != nil { return err } // Write the header to the chain evnt := &Event{Source: source} - isCanonical, newTD, err := b.writeHeaderImpl(batchHelper, evnt, header) + isCanonical, newTD, err := b.writeHeaderImpl(batchWriter, evnt, header) if err != nil { return err } @@ -906,7 +906,7 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { // write the receipts, do it only after the header has been written. // Otherwise, a client might ask for a header once the receipt is valid, // but before it is written into the storage - batchHelper.PutReceipts(block.Hash(), blockReceipts) + batchWriter.PutReceipts(block.Hash(), blockReceipts) // update snapshot if err := b.consensus.ProcessHeaders([]*types.Header{header}); err != nil { @@ -916,7 +916,7 @@ func (b *Blockchain) WriteBlock(block *types.Block, source string) error { // Update the average gas price b.updateGasPriceAvgWithBlock(block) - if err := b.writeBatchAndUpdate(batchHelper, header, newTD, isCanonical); err != nil { + if err := b.writeBatchAndUpdate(batchWriter, header, newTD, isCanonical); err != nil { return err } @@ -997,7 +997,7 @@ func (b *Blockchain) updateGasPriceAvgWithBlock(block *types.Block) { // writeBody writes the block body to the DB. // Additionally, it also updates the txn lookup, for txnHash -> block lookups -func (b *Blockchain) writeBody(batch *storage.BatchHelper, block *types.Block) error { +func (b *Blockchain) writeBody(batchWriter *storage.BatchWriter, block *types.Block) error { // Recover 'from' field in tx before saving // Because the block passed from the consensus layer doesn't have from field in tx, // due to missing encoding in RLP @@ -1006,11 +1006,11 @@ func (b *Blockchain) writeBody(batch *storage.BatchHelper, block *types.Block) e } // Write the full body (txns + receipts) - batch.PutBody(block.Header.Hash, block.Body()) + batchWriter.PutBody(block.Header.Hash, block.Body()) // Write txn lookups (txHash -> block) for _, txn := range block.Transactions { - batch.PutTxLookup(txn.Hash, block.Hash()) + batchWriter.PutTxLookup(txn.Hash, block.Hash()) } return nil @@ -1146,7 +1146,7 @@ func (b *Blockchain) dispatchEvent(evnt *Event) { // writeHeaderImpl writes a block and the data, assumes the genesis is already set // Returnning parameters (is canonical header, new total difficulty, error) func (b *Blockchain) writeHeaderImpl( - batchHelper *storage.BatchHelper, evnt *Event, header *types.Header) (bool, *big.Int, error) { + batchWriter *storage.BatchWriter, evnt *Event, header *types.Header) (bool, *big.Int, error) { // parent total difficulty of incoming header parentTD, ok := b.readTotalDifficulty(header.ParentHash) if !ok { @@ -1163,7 +1163,7 @@ func (b *Blockchain) writeHeaderImpl( // if parent of new header is current header just put everything in batch and update event // new header will be canonical one if header.ParentHash == currentHeader.Hash { - batchHelper.PutCanonicalHeader(header, incomingTD) + batchWriter.PutCanonicalHeader(header, incomingTD) evnt.Type = EventHead evnt.AddNewHeader(header) @@ -1179,11 +1179,11 @@ func (b *Blockchain) writeHeaderImpl( if incomingTD.Cmp(currentTD) > 0 { // new block has higher difficulty, reorg the chain - if err := b.handleReorg(batchHelper, evnt, currentHeader, header, incomingTD); err != nil { + if err := b.handleReorg(batchWriter, evnt, currentHeader, header, incomingTD); err != nil { return false, nil, err } - batchHelper.PutCanonicalHeader(header, incomingTD) + batchWriter.PutCanonicalHeader(header, incomingTD) return true, incomingTD, nil } @@ -1193,9 +1193,9 @@ func (b *Blockchain) writeHeaderImpl( return false, nil, err } - batchHelper.PutHeader(header) - batchHelper.PutTotalDifficulty(header.Hash, incomingTD) - batchHelper.PutForks(forks) + batchWriter.PutHeader(header) + batchWriter.PutTotalDifficulty(header.Hash, incomingTD) + batchWriter.PutForks(forks) // new block has lower difficulty, create a new fork evnt.AddOldHeader(header) @@ -1228,7 +1228,7 @@ func (b *Blockchain) getForksToWrite(header *types.Header) ([]types.Hash, error) // handleReorg handles a reorganization event func (b *Blockchain) handleReorg( - batchHelper *storage.BatchHelper, + batchWriter *storage.BatchWriter, evnt *Event, oldHeader *types.Header, newHeader *types.Header, @@ -1281,11 +1281,11 @@ func (b *Blockchain) handleReorg( return fmt.Errorf("failed to write the old header as fork: %w", err) } - batchHelper.PutForks(forks) + batchWriter.PutForks(forks) // Update canonical chain numbers for _, h := range newChain { - batchHelper.PutCanonicalHash(h.Number, h.Hash) + batchWriter.PutCanonicalHash(h.Number, h.Hash) } for _, b := range oldChain[:len(oldChain)-1] { @@ -1394,11 +1394,11 @@ func calcBaseFeeDelta(gasUsedDelta, parentGasTarget, baseFee uint64) uint64 { } func (b *Blockchain) writeBatchAndUpdate( - batchHelper *storage.BatchHelper, + batchWriter *storage.BatchWriter, header *types.Header, newTD *big.Int, isCanonnical bool) error { - if err := batchHelper.WriteBatch(); err != nil { + if err := batchWriter.WriteBatch(); err != nil { return err } diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index 1d709b56de..7b6c842487 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -532,12 +532,12 @@ func TestForkUnknownParents(t *testing.T) { h1 := AppendNewTestHeaders(h0[:5], 10) // Write genesis - bh := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) td := new(big.Int).SetUint64(h0[0].Difficulty) - bh.PutCanonicalHeader(h0[0], td) + batchWriter.PutCanonicalHeader(h0[0], td) - assert.NoError(t, b.writeBatchAndUpdate(bh, h0[0], td, true)) + assert.NoError(t, b.writeBatchAndUpdate(batchWriter, h0[0], td, true)) // Write 10 headers assert.NoError(t, b.WriteHeaders(h0[1:])) @@ -596,13 +596,13 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t1") defer chain.db.Close() - batchHelper := storage.NewBatchHelper(chain.db) + batchWriter := storage.NewBatchWriter(chain.db) assert.NoError( t, - chain.writeBody(batchHelper, block), + chain.writeBody(batchWriter, block), ) - assert.NoError(t, batchHelper.WriteBatch()) + assert.NoError(t, batchWriter.WriteBatch()) }) t.Run("should return error if tx doesn't have from and recovering address fails", func(t *testing.T) { @@ -627,14 +627,14 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t2") defer chain.db.Close() - batchHelper := storage.NewBatchHelper(chain.db) + batchWriter := storage.NewBatchWriter(chain.db) assert.ErrorIs( t, errRecoveryAddressFailed, - chain.writeBody(batchHelper, block), + chain.writeBody(batchWriter, block), ) - assert.NoError(t, batchHelper.WriteBatch()) + assert.NoError(t, batchWriter.WriteBatch()) }) t.Run("should recover from address and store to storage", func(t *testing.T) { @@ -661,10 +661,10 @@ func TestBlockchainWriteBody(t *testing.T) { chain := newChain(t, txFromByTxHash, "t3") defer chain.db.Close() - batchHelper := storage.NewBatchHelper(chain.db) + batchWriter := storage.NewBatchWriter(chain.db) - assert.NoError(t, chain.writeBody(batchHelper, block)) - assert.NoError(t, batchHelper.WriteBatch()) + assert.NoError(t, chain.writeBody(batchWriter, block)) + assert.NoError(t, batchWriter.WriteBatch()) readBody, ok := chain.readBody(block.Hash()) assert.True(t, ok) @@ -881,7 +881,7 @@ func TestBlockchainReadBody(t *testing.T) { }, } - batchHelper := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) tx := &types.Transaction{ Value: big.NewInt(10), @@ -901,11 +901,11 @@ func TestBlockchainReadBody(t *testing.T) { txFromByTxHash[tx.Hash] = types.ZeroAddress - if err := b.writeBody(batchHelper, block); err != nil { + if err := b.writeBody(batchWriter, block); err != nil { t.Fatal(err) } - assert.NoError(t, batchHelper.WriteBatch()) + assert.NoError(t, batchWriter.WriteBatch()) txFromByTxHash[tx.Hash] = addr diff --git a/blockchain/storage/batch_helper.go b/blockchain/storage/batch_writer.go similarity index 60% rename from blockchain/storage/batch_helper.go rename to blockchain/storage/batch_writer.go index ec4a952d81..e2032601a7 100644 --- a/blockchain/storage/batch_helper.go +++ b/blockchain/storage/batch_writer.go @@ -14,44 +14,44 @@ type Batch interface { Put(k []byte, v []byte) } -type BatchHelper struct { +type BatchWriter struct { batch Batch } -func NewBatchHelper(storage Storage) *BatchHelper { - return &BatchHelper{batch: storage.NewBatch()} +func NewBatchWriter(storage Storage) *BatchWriter { + return &BatchWriter{batch: storage.NewBatch()} } -func (b *BatchHelper) PutHeader(h *types.Header) { +func (b *BatchWriter) PutHeader(h *types.Header) { b.putRlp(HEADER, h.Hash.Bytes(), h) } -func (b *BatchHelper) PutBody(hash types.Hash, body *types.Body) { +func (b *BatchWriter) PutBody(hash types.Hash, body *types.Body) { b.putRlp(BODY, hash.Bytes(), body) } -func (b *BatchHelper) PutHeadHash(h types.Hash) { +func (b *BatchWriter) PutHeadHash(h types.Hash) { b.putWithPrefix(HEAD, HASH, h.Bytes()) } -func (b *BatchHelper) PutTxLookup(hash types.Hash, blockHash types.Hash) { +func (b *BatchWriter) PutTxLookup(hash types.Hash, blockHash types.Hash) { ar := &fastrlp.Arena{} vr := ar.NewBytes(blockHash.Bytes()).MarshalTo(nil) b.putWithPrefix(TX_LOOKUP_PREFIX, hash.Bytes(), vr) } -func (b *BatchHelper) PutHeadNumber(n uint64) { +func (b *BatchWriter) PutHeadNumber(n uint64) { b.putWithPrefix(HEAD, NUMBER, common.EncodeUint64ToBytes(n)) } -func (b *BatchHelper) PutReceipts(hash types.Hash, receipts []*types.Receipt) { +func (b *BatchWriter) PutReceipts(hash types.Hash, receipts []*types.Receipt) { rr := types.Receipts(receipts) b.putRlp(RECEIPTS, hash.Bytes(), &rr) } -func (b *BatchHelper) PutCanonicalHeader(h *types.Header, diff *big.Int) { +func (b *BatchWriter) PutCanonicalHeader(h *types.Header, diff *big.Int) { b.PutHeader(h) b.PutHeadHash(h.Hash) b.PutHeadNumber(h.Number) @@ -59,21 +59,21 @@ func (b *BatchHelper) PutCanonicalHeader(h *types.Header, diff *big.Int) { b.PutTotalDifficulty(h.Hash, diff) } -func (b *BatchHelper) PutCanonicalHash(n uint64, hash types.Hash) { +func (b *BatchWriter) PutCanonicalHash(n uint64, hash types.Hash) { b.putWithPrefix(CANONICAL, common.EncodeUint64ToBytes(n), hash.Bytes()) } -func (b *BatchHelper) PutTotalDifficulty(hash types.Hash, diff *big.Int) { +func (b *BatchWriter) PutTotalDifficulty(hash types.Hash, diff *big.Int) { b.putWithPrefix(DIFFICULTY, hash.Bytes(), diff.Bytes()) } -func (b *BatchHelper) PutForks(forks []types.Hash) { +func (b *BatchWriter) PutForks(forks []types.Hash) { ff := Forks(forks) b.putRlp(FORK, EMPTY, &ff) } -func (b *BatchHelper) putRlp(p, k []byte, raw types.RLPMarshaler) { +func (b *BatchWriter) putRlp(p, k []byte, raw types.RLPMarshaler) { var data []byte if obj, ok := raw.(types.RLPStoreMarshaler); ok { @@ -85,12 +85,12 @@ func (b *BatchHelper) putRlp(p, k []byte, raw types.RLPMarshaler) { b.putWithPrefix(p, k, data) } -func (b *BatchHelper) putWithPrefix(p, k, data []byte) { +func (b *BatchWriter) putWithPrefix(p, k, data []byte) { fullKey := append(append(make([]byte, 0, len(p)+len(k)), p...), k...) b.batch.Put(fullKey, data) } -func (b *BatchHelper) WriteBatch() error { +func (b *BatchWriter) WriteBatch() error { return b.batch.Write() } diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index b129ff392f..1a1a39ca3e 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -247,21 +247,21 @@ insertloop: case <-ctx.Done(): break insertloop case b := <-blockchain: - batchHelper := storage.NewBatchHelper(s) + batchWriter := storage.NewBatchWriter(s) - batchHelper.PutBody(b.Block.Hash(), b.Block.Body()) + batchWriter.PutBody(b.Block.Hash(), b.Block.Body()) for _, tx := range b.Block.Transactions { - batchHelper.PutTxLookup(tx.Hash, b.Block.Hash()) + batchWriter.PutTxLookup(tx.Hash, b.Block.Hash()) } - batchHelper.PutHeader(b.Block.Header) - batchHelper.PutHeadNumber(uint64(i)) - batchHelper.PutHeadHash(b.Block.Header.Hash) - batchHelper.PutReceipts(b.Block.Hash(), b.Receipts) - batchHelper.PutCanonicalHash(uint64(i), b.Block.Hash()) + batchWriter.PutHeader(b.Block.Header) + batchWriter.PutHeadNumber(uint64(i)) + batchWriter.PutHeadHash(b.Block.Header.Hash) + batchWriter.PutReceipts(b.Block.Hash(), b.Receipts) + batchWriter.PutCanonicalHash(uint64(i), b.Block.Hash()) - if err := batchHelper.WriteBatch(); err != nil { + if err := batchWriter.WriteBatch(); err != nil { require.NoError(t, err) } diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index b61a2b192a..1afb015e32 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -77,7 +77,7 @@ func testCanonicalChain(t *testing.T, m PlaceholderStorage) { } for _, cc := range cases { - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) h := &types.Header{ Number: cc.Number, @@ -124,7 +124,7 @@ func testDifficulty(t *testing.T, m PlaceholderStorage) { } for indx, cc := range cases { - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) h := &types.Header{ Number: uint64(indx), @@ -156,7 +156,7 @@ func testHead(t *testing.T, m PlaceholderStorage) { defer closeFn() for i := uint64(0); i < 5; i++ { - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) h := &types.Header{ Number: i, @@ -204,7 +204,7 @@ func testForks(t *testing.T, m PlaceholderStorage) { } for _, cc := range cases { - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) batch.PutForks(cc.Forks) @@ -235,7 +235,7 @@ func testHeader(t *testing.T, m PlaceholderStorage) { } header.ComputeHash() - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) batch.PutHeader(header) @@ -263,7 +263,7 @@ func testBody(t *testing.T, m PlaceholderStorage) { ExtraData: []byte{}, // if not set it will fail } - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) batch.PutHeader(header) @@ -298,7 +298,7 @@ func testBody(t *testing.T, m PlaceholderStorage) { Transactions: []*types.Transaction{t0, t1}, } - batch2 := NewBatchHelper(s) + batch2 := NewBatchWriter(s) body0 := block.Body() batch2.PutBody(header.Hash, body0) @@ -327,7 +327,7 @@ func testReceipts(t *testing.T, m PlaceholderStorage) { s, closeFn := m(t) defer closeFn() - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) h := &types.Header{ Difficulty: 133, @@ -407,7 +407,7 @@ func testWriteCanonicalHeader(t *testing.T, m PlaceholderStorage) { h.ComputeHash() diff := new(big.Int).SetUint64(100) - batch := NewBatchHelper(s) + batch := NewBatchWriter(s) batch.PutCanonicalHeader(h, diff) diff --git a/blockchain/testing.go b/blockchain/testing.go index a9c4467376..9188360330 100644 --- a/blockchain/testing.go +++ b/blockchain/testing.go @@ -117,12 +117,12 @@ func NewTestBlockchain(t *testing.T, headers []*types.Header) *Blockchain { } if len(headers) > 0 { - bh := storage.NewBatchHelper(b.db) + batchWriter := storage.NewBatchWriter(b.db) td := new(big.Int).SetUint64(headers[0].Difficulty) - bh.PutCanonicalHeader(headers[0], td) + batchWriter.PutCanonicalHeader(headers[0], td) - if err := b.writeBatchAndUpdate(bh, headers[0], td, true); err != nil { + if err := b.writeBatchAndUpdate(batchWriter, headers[0], td, true); err != nil { t.Fatal(err) } From deaaad20409a65167b0024bc49564d08b25f474d Mon Sep 17 00:00:00 2001 From: Igor Crevar Date: Tue, 4 Jul 2023 09:25:35 +0200 Subject: [PATCH 15/16] use some common things --- blockchain/storage/keyvalue.go | 21 +---- blockchain/storage/leveldb/leveldb_test.go | 91 +++++++++------------- 2 files changed, 40 insertions(+), 72 deletions(-) diff --git a/blockchain/storage/keyvalue.go b/blockchain/storage/keyvalue.go index f1e0ea7bd5..a11e969487 100644 --- a/blockchain/storage/keyvalue.go +++ b/blockchain/storage/keyvalue.go @@ -2,10 +2,10 @@ package storage import ( - "encoding/binary" "fmt" "math/big" + "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" "github.com/hashicorp/go-hclog" "github.com/umbracle/fastrlp" @@ -53,7 +53,6 @@ var ( // KV = Key-Value type KV interface { Close() error - Set(p []byte, v []byte) error Get(p []byte) ([]byte, bool, error) NewBatch() Batch } @@ -69,22 +68,11 @@ func NewKeyValueStorage(logger hclog.Logger, db KV) Storage { return &KeyValueStorage{logger: logger, db: db} } -func (s *KeyValueStorage) encodeUint(n uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b[:], n) - - return b[:] -} - -func (s *KeyValueStorage) decodeUint(b []byte) uint64 { - return binary.BigEndian.Uint64(b[:]) -} - // -- canonical hash -- // ReadCanonicalHash gets the hash from the number of the canonical chain func (s *KeyValueStorage) ReadCanonicalHash(n uint64) (types.Hash, bool) { - data, ok := s.get(CANONICAL, s.encodeUint(n)) + data, ok := s.get(CANONICAL, common.EncodeUint64ToBytes(n)) if !ok { return types.Hash{}, false } @@ -115,7 +103,7 @@ func (s *KeyValueStorage) ReadHeadNumber() (uint64, bool) { return 0, false } - return s.decodeUint(data), true + return common.EncodeBytesToUint64(data), true } // FORK // @@ -191,8 +179,6 @@ func (s *KeyValueStorage) ReadTxLookup(hash types.Hash) (types.Hash, bool) { return types.BytesToHash(blockHash), true } -// WRITE OPERATIONS // - var ErrNotFound = fmt.Errorf("not found") func (s *KeyValueStorage) readRLP(p, k []byte, raw types.RLPUnmarshaler) error { @@ -252,6 +238,7 @@ func (s *KeyValueStorage) Close() error { return s.db.Close() } +// NewBatch creates batch used for write/update/delete operations func (s *KeyValueStorage) NewBatch() Batch { return s.db.NewBatch() } diff --git a/blockchain/storage/leveldb/leveldb_test.go b/blockchain/storage/leveldb/leveldb_test.go index 1a1a39ca3e..95f24c48ae 100644 --- a/blockchain/storage/leveldb/leveldb_test.go +++ b/blockchain/storage/leveldb/leveldb_test.go @@ -2,8 +2,8 @@ package leveldb import ( "context" + "crypto/rand" "math/big" - "math/rand" "os" "os/signal" "path/filepath" @@ -48,81 +48,62 @@ func TestStorage(t *testing.T) { storage.TestStorage(t, newStorage) } -func generateRandomByteSlice(count int) []byte { - s := make([]byte, count) - for i := 0; i < count; i++ { - s[i] = byte(rand.Int()) - } +func generateTxs(t *testing.T, startNonce, count int, from types.Address, to *types.Address) []*types.Transaction { + t.Helper() - return s -} + txs := make([]*types.Transaction, count) -func generateFullTx(nonce uint64, from types.Address, to *types.Address, value *big.Int, dynamic bool, v *big.Int) *types.Transaction { - tx := &types.Transaction{} - - tx.Gas = types.StateTransactionGasLimit - tx.Nonce = nonce - tx.From = from - tx.To = to - tx.Value = value - tx.V = v - tx.Input = generateRandomByteSlice(1000) - tx.Hash = types.BytesToHash(generateRandomByteSlice(32)) - - if dynamic { - tx.Type = types.DynamicFeeTx - tx.GasFeeCap = v - tx.GasTipCap = v - } else { - tx.Type = types.LegacyTx - tx.GasPrice = v - } + for i := range txs { + tx := &types.Transaction{ + Gas: types.StateTransactionGasLimit, + Nonce: uint64(startNonce + i), + From: from, + To: to, + Value: big.NewInt(2000), + Type: types.DynamicFeeTx, + GasFeeCap: big.NewInt(100), + GasTipCap: big.NewInt(10), + } - return tx -} + input := make([]byte, 1000) + _, err := rand.Read(input) -func generateFullTxs(t *testing.T, startNonce, count int, from types.Address, to *types.Address) []*types.Transaction { - t.Helper() + require.NoError(t, err) - v := big.NewInt(1) - txs := make([]*types.Transaction, count) + tx.ComputeHash() - for i := 0; i < count; i++ { - txs[i] = generateFullTx(uint64(startNonce+i), from, to, big.NewInt(1), false, v) + txs[i] = tx } return txs } -var ( - addr1 = types.StringToAddress("1") - addr2 = types.StringToAddress("2") -) - func generateBlock(t *testing.T, num uint64) *types.FullBlock { t.Helper() - b := &types.FullBlock{} - - b.Block = &types.Block{} - b.Block.Header = &types.Header{ - Number: num, - ExtraData: generateRandomByteSlice(32), - Hash: types.BytesToHash(generateRandomByteSlice(32)), + transactionsCount := 2500 + status := types.ReceiptSuccess + addr1 := types.StringToAddress("17878aa") + addr2 := types.StringToAddress("2bf5653") + b := &types.FullBlock{ + Block: &types.Block{ + Header: &types.Header{ + Number: num, + ExtraData: make([]byte, 32), + Hash: types.ZeroHash, + }, + Transactions: generateTxs(t, 0, transactionsCount, addr1, &addr2), + Uncles: blockchain.NewTestHeaders(10), + }, + Receipts: make([]*types.Receipt, transactionsCount), } - b.Block.Transactions = generateFullTxs(t, 0, 2500, addr1, &addr2) - b.Receipts = make([]*types.Receipt, len(b.Block.Transactions)) - b.Block.Uncles = blockchain.NewTestHeaders(10) - - var status types.ReceiptStatus = types.ReceiptSuccess - logs := make([]*types.Log, 10) for i := 0; i < 10; i++ { logs[i] = &types.Log{ Address: addr1, - Topics: []types.Hash{types.StringToHash("topic1"), types.StringToHash("topic2"), types.StringToHash("topic3")}, + Topics: []types.Hash{types.StringToHash("t1"), types.StringToHash("t2"), types.StringToHash("t3")}, Data: []byte{0xaa, 0xbb, 0xcc, 0xdd, 0xbb, 0xaa, 0x01, 0x012}, } } From 82aa7842da87c7d74154252dcd46320ffe564b7e Mon Sep 17 00:00:00 2001 From: Igor Crevar Date: Wed, 5 Jul 2023 17:59:57 +0200 Subject: [PATCH 16/16] TestBlockchain_WriteFullBlock test --- blockchain/blockchain.go | 5 -- blockchain/blockchain_test.go | 113 +++++++++++++++++++++++++++++++++- blockchain/storage/testing.go | 10 +++ blockchain/testing.go | 2 +- 4 files changed, 122 insertions(+), 8 deletions(-) diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 588f4dbede..67673dceeb 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -546,11 +546,6 @@ func (b *Blockchain) GetHeaderByNumber(n uint64) (*types.Header, bool) { return h, true } -// WriteHeaders writes an array of headers -func (b *Blockchain) WriteHeaders(headers []*types.Header) error { - return b.WriteHeadersWithBodies(headers) -} - // WriteHeadersWithBodies writes a batch of headers func (b *Blockchain) WriteHeadersWithBodies(headers []*types.Header) error { // Check the size diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index 7b6c842487..9d9c16a282 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -7,11 +7,15 @@ import ( "reflect" "testing" + "github.com/0xPolygon/polygon-edge/helper/common" + "github.com/0xPolygon/polygon-edge/helper/hex" "github.com/0xPolygon/polygon-edge/state" "github.com/hashicorp/go-hclog" + lru "github.com/hashicorp/golang-lru" "github.com/0xPolygon/polygon-edge/chain" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/0xPolygon/polygon-edge/blockchain/storage" "github.com/0xPolygon/polygon-edge/blockchain/storage/memory" @@ -463,7 +467,8 @@ func TestInsertHeaders(t *testing.T) { // run the history for i := 1; i < len(cc.History); i++ { - if err := b.WriteHeaders([]*types.Header{chain.headers[cc.History[i].header.hash]}); err != nil { + headers := []*types.Header{chain.headers[cc.History[i].header.hash]} + if err := b.WriteHeadersWithBodies(headers); err != nil { t.Fatal(err) } @@ -540,7 +545,7 @@ func TestForkUnknownParents(t *testing.T) { assert.NoError(t, b.writeBatchAndUpdate(batchWriter, h0[0], td, true)) // Write 10 headers - assert.NoError(t, b.WriteHeaders(h0[1:])) + assert.NoError(t, b.WriteHeadersWithBodies(h0[1:])) // Cannot write this header because the father h1[11] is not known assert.Error(t, b.WriteHeadersWithBodies([]*types.Header{h1[12]})) @@ -1407,3 +1412,107 @@ func TestBlockchain_CalculateBaseFee(t *testing.T) { }) } } + +func TestBlockchain_WriteFullBlock(t *testing.T) { + t.Parallel() + + getKey := func(p []byte, k []byte) []byte { + return append(append(make([]byte, 0, len(p)+len(k)), p...), k...) + } + db := map[string][]byte{} + consensusMock := &MockVerifier{ + processHeadersFn: func(hs []*types.Header) error { + assert.Len(t, hs, 1) + + return nil + }, + } + + storageMock := storage.NewMockStorage() + storageMock.HookNewBatch(func() storage.Batch { + return memory.NewBatchMemory(db) + }) + + bc := &Blockchain{ + gpAverage: &gasPriceAverage{ + count: new(big.Int), + }, + logger: hclog.NewNullLogger(), + db: storageMock, + consensus: consensusMock, + config: &chain.Chain{ + Params: &chain.Params{ + Forks: &chain.Forks{ + chain.London: chain.NewFork(5), + }, + }, + Genesis: &chain.Genesis{ + BaseFeeEM: 4, + }, + }, + stream: &eventStream{}, + } + + bc.headersCache, _ = lru.New(10) + bc.difficultyCache, _ = lru.New(10) + + existingTD := big.NewInt(1) + existingHeader := &types.Header{Number: 1} + header := &types.Header{ + Number: 2, + } + receipts := []*types.Receipt{ + {GasUsed: 100}, + {GasUsed: 200}, + } + tx := &types.Transaction{ + Value: big.NewInt(1), + } + + tx.ComputeHash() + header.ComputeHash() + existingHeader.ComputeHash() + bc.currentHeader.Store(existingHeader) + bc.currentDifficulty.Store(existingTD) + + header.ParentHash = existingHeader.Hash + bc.txSigner = &mockSigner{ + txFromByTxHash: map[types.Hash]types.Address{ + tx.Hash: {1, 2}, + }, + } + + // already existing block write + err := bc.WriteFullBlock(&types.FullBlock{ + Block: &types.Block{ + Header: existingHeader, + Transactions: []*types.Transaction{tx}, + }, + Receipts: receipts, + }, "polybft") + + require.NoError(t, err) + require.Equal(t, 0, len(db)) + require.Equal(t, uint64(1), bc.currentHeader.Load().Number) + + // already existing block write + err = bc.WriteFullBlock(&types.FullBlock{ + Block: &types.Block{ + Header: header, + Transactions: []*types.Transaction{tx}, + }, + Receipts: receipts, + }, "polybft") + + require.NoError(t, err) + require.Equal(t, 8, len(db)) + require.Equal(t, uint64(2), bc.currentHeader.Load().Number) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.BODY, header.Hash.Bytes()))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.TX_LOOKUP_PREFIX, tx.Hash.Bytes()))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.HEADER, header.Hash.Bytes()))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.HEAD, storage.HASH))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.CANONICAL, common.EncodeUint64ToBytes(header.Number)))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.DIFFICULTY, header.Hash.Bytes()))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.CANONICAL, common.EncodeUint64ToBytes(header.Number)))]) + require.NotNil(t, db[hex.EncodeToHex(getKey(storage.RECEIPTS, header.Hash.Bytes()))]) +} diff --git a/blockchain/storage/testing.go b/blockchain/storage/testing.go index 1afb015e32..45615363ad 100644 --- a/blockchain/storage/testing.go +++ b/blockchain/storage/testing.go @@ -461,6 +461,7 @@ type readSnapshotDelegate func(types.Hash) ([]byte, bool) type readReceiptsDelegate func(types.Hash) ([]*types.Receipt, error) type readTxLookupDelegate func(types.Hash) (types.Hash, bool) type closeDelegate func() error +type newBatchDelegate func() Batch type MockStorage struct { readCanonicalHashFn readCanonicalHashDelegate @@ -473,6 +474,7 @@ type MockStorage struct { readReceiptsFn readReceiptsDelegate readTxLookupFn readTxLookupDelegate closeFn closeDelegate + newBatchFn newBatchDelegate } func NewMockStorage() *MockStorage { @@ -599,6 +601,14 @@ func (m *MockStorage) HookClose(fn closeDelegate) { m.closeFn = fn } +func (m *MockStorage) HookNewBatch(fn newBatchDelegate) { + m.newBatchFn = fn +} + func (m *MockStorage) NewBatch() Batch { + if m.newBatchFn != nil { + return m.newBatchFn() + } + return nil } diff --git a/blockchain/testing.go b/blockchain/testing.go index 9188360330..cac29f43bf 100644 --- a/blockchain/testing.go +++ b/blockchain/testing.go @@ -126,7 +126,7 @@ func NewTestBlockchain(t *testing.T, headers []*types.Header) *Blockchain { t.Fatal(err) } - if err := b.WriteHeaders(headers[1:]); err != nil { + if err := b.WriteHeadersWithBodies(headers[1:]); err != nil { t.Fatal(err) } }