From deb2cf51a9178b182c96e47d128cca935d675bbd Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 12:14:12 -0500 Subject: [PATCH 01/48] root of transactions by stateless merkledb --- chain/block.go | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/chain/block.go b/chain/block.go index 4aab647fe4..d334c3df1e 100644 --- a/chain/block.go +++ b/chain/block.go @@ -9,12 +9,15 @@ import ( "fmt" "time" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" @@ -40,7 +43,8 @@ type StatefulBlock struct { Tmstmp int64 `json:"timestamp"` Hght uint64 `json:"height"` - Txs []*Transaction `json:"txs"` + Txs []*Transaction `json:"txs"` + TxsRoot []byte `json:"txsRoot"` // StateRoot is the root of the post-execution state // of [Prnt]. @@ -290,6 +294,46 @@ func (b *StatelessBlock) initializeBuilt( b.containsWarp = true } } + + // transaction hash generation + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: b.vm.Tracer(), + }) + if err != nil { + return err + } + // collect keys, values from transactions/results + var ops []database.BatchOp + for _, tx := range b.Txs { + key := utils.ToID(tx.Bytes()) + ops = append(ops, database.BatchOp{ + Key: key[:], + Value: tx.Bytes(), + }) + } + for _, result := range b.results { + key := utils.ToID(result.Output) + ops = append(ops, database.BatchOp{ + Key: key[:], + Value: result.Output, + }) + } + view, err = db.NewView(ctx, merkledb.ViewChanges{BatchOps: ops}) + if err != nil { + return err + } + view.CommitToDB(ctx) + txsRoot, err := db.GetMerkleRoot(ctx) + if err != nil { + return err + } + b.TxsRoot = txsRoot[:] + return nil } From 0804930f27112107e5f74b4168b9db127a2bc475 Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 16:36:26 -0500 Subject: [PATCH 02/48] make root generation a function --- chain/block.go | 39 ++++++--------------------------------- utils/utils.go | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 33 deletions(-) diff --git a/chain/block.go b/chain/block.go index d334c3df1e..6eca2967cd 100644 --- a/chain/block.go +++ b/chain/block.go @@ -9,15 +9,12 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" @@ -296,43 +293,19 @@ func (b *StatelessBlock) initializeBuilt( } // transaction hash generation - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - EvictionBatchSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: b.vm.Tracer(), - }) - if err != nil { - return err - } - // collect keys, values from transactions/results - var ops []database.BatchOp + var merkleItems [][]byte for _, tx := range b.Txs { - key := utils.ToID(tx.Bytes()) - ops = append(ops, database.BatchOp{ - Key: key[:], - Value: tx.Bytes(), - }) + merkleItems = append(merkleItems, tx.Bytes()) } for _, result := range b.results { - key := utils.ToID(result.Output) - ops = append(ops, database.BatchOp{ - Key: key[:], - Value: result.Output, - }) + merkleItems = append(merkleItems, result.Output) } - view, err = db.NewView(ctx, merkledb.ViewChanges{BatchOps: ops}) - if err != nil { - return err - } - view.CommitToDB(ctx) - txsRoot, err := db.GetMerkleRoot(ctx) + + root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems) if err != nil { return err } - b.TxsRoot = txsRoot[:] + b.TxsRoot = root return nil } diff --git a/utils/utils.go b/utils/utils.go index 2635e1aa4b..86384e1034 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,6 +4,7 @@ package utils import ( + "context" "fmt" "math" "net" @@ -13,9 +14,14 @@ import ( "strconv" "time" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/consts" formatter "github.com/onsi/ginkgo/v2/formatter" ) @@ -116,3 +122,44 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } + +// Generate merkle root for a set of items +// this function does not take ownership of given bytes array +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte) ([]byte, merkledb.MerkleDB, error) { + var batchOps []database.BatchOp + + for _, item := range merkleItems { + key := ToID(item) + batchOps = append(batchOps, database.BatchOp{ + Key: key[:], + Value: item, + }) + } + + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + }) + if err != nil { + return nil, nil, err + } + + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps}) + if err != nil { + return nil, nil, err + } + if err := view.CommitToDB(ctx); err != nil { + return nil, nil, err + } + + root, err := db.GetMerkleRoot(ctx) + if err != nil { + return nil, nil, err + } + + return root[:], db, nil +} From eb41d9a7a9ea9902fbaf0531642a166c4b505cfd Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 17:05:46 -0500 Subject: [PATCH 03/48] preallocate memory for merkle array and consumebytes flag --- chain/block.go | 5 +++-- hypersdk.code-workspace | 8 ++++++++ utils/utils.go | 6 +++--- 3 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 hypersdk.code-workspace diff --git a/chain/block.go b/chain/block.go index 6eca2967cd..03eedd7e09 100644 --- a/chain/block.go +++ b/chain/block.go @@ -293,7 +293,7 @@ func (b *StatelessBlock) initializeBuilt( } // transaction hash generation - var merkleItems [][]byte + merkleItems := make([][]byte, 0, len(b.Txs)+len(b.results)) for _, tx := range b.Txs { merkleItems = append(merkleItems, tx.Bytes()) } @@ -301,7 +301,8 @@ func (b *StatelessBlock) initializeBuilt( merkleItems = append(merkleItems, result.Output) } - root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems) + // consume bytes to avoid extra copying + root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems, true) if err != nil { return err } diff --git a/hypersdk.code-workspace b/hypersdk.code-workspace new file mode 100644 index 0000000000..876a1499c0 --- /dev/null +++ b/hypersdk.code-workspace @@ -0,0 +1,8 @@ +{ + "folders": [ + { + "path": "." + } + ], + "settings": {} +} \ No newline at end of file diff --git a/utils/utils.go b/utils/utils.go index 86384e1034..fac2f0bacc 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -125,8 +125,8 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { // Generate merkle root for a set of items // this function does not take ownership of given bytes array -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte) ([]byte, merkledb.MerkleDB, error) { - var batchOps []database.BatchOp +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { + batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { key := ToID(item) @@ -148,7 +148,7 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] return nil, nil, err } - view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps}) + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) if err != nil { return nil, nil, err } From 77c79524cbf6b9706060e91434782637c019d08f Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 17:28:24 -0500 Subject: [PATCH 04/48] remove wrong merkleroot docstring --- utils/utils.go | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/utils.go b/utils/utils.go index fac2f0bacc..b7f5f3feca 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -124,7 +124,6 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } // Generate merkle root for a set of items -// this function does not take ownership of given bytes array func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) From 6dc40e9a44197a7dbf99da1a11e71584bd87c4a5 Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 21:53:33 -0500 Subject: [PATCH 05/48] add <*.code-workspace> to .gitignore and remove it from git commit --- .gitignore | 2 ++ hypersdk.code-workspace | 8 -------- 2 files changed, 2 insertions(+), 8 deletions(-) delete mode 100644 hypersdk.code-workspace diff --git a/.gitignore b/.gitignore index c556d1e590..410da12571 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,5 @@ osxcross/ target/ Cargo.lock **/*.rs.bk + +*.code-workspace \ No newline at end of file diff --git a/hypersdk.code-workspace b/hypersdk.code-workspace deleted file mode 100644 index 876a1499c0..0000000000 --- a/hypersdk.code-workspace +++ /dev/null @@ -1,8 +0,0 @@ -{ - "folders": [ - { - "path": "." - } - ], - "settings": {} -} \ No newline at end of file From 09b0b6baf2d37620ca91b2931731292bcee22e94 Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 22:11:51 -0500 Subject: [PATCH 06/48] move root generation func to merkle package, tx root by items of [txID + result] --- chain/block.go | 16 ++++++++------- merkle/merkle.go | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ utils/utils.go | 46 ------------------------------------------ 3 files changed, 61 insertions(+), 53 deletions(-) create mode 100644 merkle/merkle.go diff --git a/chain/block.go b/chain/block.go index 03eedd7e09..ff731ef28b 100644 --- a/chain/block.go +++ b/chain/block.go @@ -23,6 +23,7 @@ import ( "github.com/ava-labs/hypersdk/codec" "github.com/ava-labs/hypersdk/consts" + "github.com/ava-labs/hypersdk/merkle" "github.com/ava-labs/hypersdk/state" "github.com/ava-labs/hypersdk/utils" "github.com/ava-labs/hypersdk/window" @@ -293,16 +294,17 @@ func (b *StatelessBlock) initializeBuilt( } // transaction hash generation - merkleItems := make([][]byte, 0, len(b.Txs)+len(b.results)) - for _, tx := range b.Txs { - merkleItems = append(merkleItems, tx.Bytes()) - } - for _, result := range b.results { - merkleItems = append(merkleItems, result.Output) + // [len(b.Txs)] should be equal to [b.results] + merkleItems := make([][]byte, 0, len(b.Txs)) + for i := 0; i < len(b.Txs); i++ { + txID := b.Txs[i].ID() + resultOutput := b.results[i].Output + // [txID + resultOutput] + merkleItems = append(merkleItems, append(txID[:], resultOutput...)) } // consume bytes to avoid extra copying - root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems, true) + root, _, err := merkle.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems, true) if err != nil { return err } diff --git a/merkle/merkle.go b/merkle/merkle.go new file mode 100644 index 0000000000..53333b7692 --- /dev/null +++ b/merkle/merkle.go @@ -0,0 +1,52 @@ +package merkle + +import ( + "context" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/ava-labs/hypersdk/utils" +) + +// Generate merkle root for a set of items +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { + batchOps := make([]database.BatchOp, 0, len(merkleItems)) + + for _, item := range merkleItems { + key := utils.ToID(item) + batchOps = append(batchOps, database.BatchOp{ + Key: key[:], + Value: item, + }) + } + + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + }) + if err != nil { + return nil, nil, err + } + + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) + if err != nil { + return nil, nil, err + } + if err := view.CommitToDB(ctx); err != nil { + return nil, nil, err + } + + root, err := db.GetMerkleRoot(ctx) + if err != nil { + return nil, nil, err + } + + return root[:], db, nil +} diff --git a/utils/utils.go b/utils/utils.go index b7f5f3feca..2635e1aa4b 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,7 +4,6 @@ package utils import ( - "context" "fmt" "math" "net" @@ -14,14 +13,9 @@ import ( "strconv" "time" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/consts" formatter "github.com/onsi/ginkgo/v2/formatter" ) @@ -122,43 +116,3 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } - -// Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { - batchOps := make([]database.BatchOp, 0, len(merkleItems)) - - for _, item := range merkleItems { - key := ToID(item) - batchOps = append(batchOps, database.BatchOp{ - Key: key[:], - Value: item, - }) - } - - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - EvictionBatchSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - }) - if err != nil { - return nil, nil, err - } - - view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) - if err != nil { - return nil, nil, err - } - if err := view.CommitToDB(ctx); err != nil { - return nil, nil, err - } - - root, err := db.GetMerkleRoot(ctx) - if err != nil { - return nil, nil, err - } - - return root[:], db, nil -} From 7586f403750b9232520506758ee6e850a3b78f17 Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Sat, 20 Jan 2024 09:03:31 -0500 Subject: [PATCH 07/48] comments about appending outputs to slice of txID --- chain/block.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chain/block.go b/chain/block.go index ff731ef28b..af58104280 100644 --- a/chain/block.go +++ b/chain/block.go @@ -300,6 +300,9 @@ func (b *StatelessBlock) initializeBuilt( txID := b.Txs[i].ID() resultOutput := b.results[i].Output // [txID + resultOutput] + // txID is a fixed length array, hence [append] will always allocate new memory and copy + // so slice with new address will be returned and no reflect on txID, then later + // we consume those bytes merkleItems = append(merkleItems, append(txID[:], resultOutput...)) } From f009424f24e9a01b4d1fb83492ad8fdfd533d071 Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Sun, 28 Jan 2024 09:30:49 -0500 Subject: [PATCH 08/48] rebase & blk marshal/unmarshal & merkleroot to ids.ID --- chain/block.go | 5 ++++- merkle/merkle.go | 14 +++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/chain/block.go b/chain/block.go index af58104280..3a42c1a1c5 100644 --- a/chain/block.go +++ b/chain/block.go @@ -42,7 +42,7 @@ type StatefulBlock struct { Hght uint64 `json:"height"` Txs []*Transaction `json:"txs"` - TxsRoot []byte `json:"txsRoot"` + TxsRoot ids.ID `json:"txsRoot"` // StateRoot is the root of the post-execution state // of [Prnt]. @@ -1017,6 +1017,8 @@ func (b *StatefulBlock) Marshal() ([]byte, error) { p.PackID(b.StateRoot) p.PackUint64(uint64(b.WarpResults)) + p.PackID(b.TxsRoot) + bytes := p.Bytes() if err := p.Err(); err != nil { return nil, err @@ -1052,6 +1054,7 @@ func UnmarshalBlock(raw []byte, parser Parser) (*StatefulBlock, error) { p.UnpackID(false, &b.StateRoot) b.WarpResults = set.Bits64(p.UnpackUint64(false)) + p.UnpackID(false, &b.TxsRoot) // Ensure no leftover bytes if !p.Empty() { diff --git a/merkle/merkle.go b/merkle/merkle.go index 53333b7692..e76e7efbad 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -5,6 +5,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" @@ -12,7 +13,7 @@ import ( ) // Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { @@ -26,27 +27,26 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ BranchFactor: merkledb.BranchFactor16, HistoryLength: 100, - EvictionBatchSize: units.MiB, IntermediateNodeCacheSize: units.MiB, ValueNodeCacheSize: units.MiB, Tracer: tracer, }) if err != nil { - return nil, nil, err + return ids.Empty, nil, err } view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) if err != nil { - return nil, nil, err + return ids.Empty, nil, err } if err := view.CommitToDB(ctx); err != nil { - return nil, nil, err + return ids.Empty, nil, err } root, err := db.GetMerkleRoot(ctx) if err != nil { - return nil, nil, err + return ids.Empty, nil, err } - return root[:], db, nil + return root, db, nil } From 2eae8dab6ef8646ba3de602ae18698b477a8ea6a Mon Sep 17 00:00:00 2001 From: francois Date: Mon, 18 Mar 2024 17:31:30 +0100 Subject: [PATCH 09/48] write benches for the merkle package --- merkle/merkle_test.go | 44 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 merkle/merkle_test.go diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go new file mode 100644 index 0000000000..76e4c8b841 --- /dev/null +++ b/merkle/merkle_test.go @@ -0,0 +1,44 @@ +package merkle + +import ( + "encoding/binary" + "testing" + + "context" + "strconv" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +var res_root ids.ID +var res_db merkledb.MerkleDB +var res_err error + +func BenchmarkMerkleTxRoot(b *testing.B) { + for _, size := range []int{10, 100, 1000, 10000} { + ctx := context.TODO() + tracer := trace.Noop + merkleItems := make([][]byte, 0, size) + for i := 0; i < size; i++ { + as_bytes := make([]byte, 32) + binary.BigEndian.PutUint32(as_bytes, uint32(i)) + merkleItems = append(merkleItems, as_bytes) + } + var root ids.ID + var db merkledb.MerkleDB + var err error + b.Run(strconv.Itoa(size), func(b *testing.B) { + for n := 0; n < b.N; n++ { + for i := 0; i < size; i++ { + root, db, err = GenerateMerkleRoot(ctx, tracer, merkleItems, false) + } + } + }) + // avoid compiler optimizations to cancel out the bench + res_root = root + res_db = db + res_err = err + } +} From cd108423d752c9cf919ca964a29446bf8c73711c Mon Sep 17 00:00:00 2001 From: francois Date: Fri, 22 Mar 2024 19:53:06 +0100 Subject: [PATCH 10/48] use crypto/rand, fix var name, report allocs --- merkle/merkle_test.go | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 76e4c8b841..a90db3cc3e 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,9 +1,9 @@ package merkle import ( - "encoding/binary" "testing" + "crypto/rand" "context" "strconv" @@ -12,23 +12,30 @@ import ( "github.com/ava-labs/avalanchego/x/merkledb" ) -var res_root ids.ID -var res_db merkledb.MerkleDB -var res_err error +var resRoot ids.ID +var resDb merkledb.MerkleDB +var resErr error func BenchmarkMerkleTxRoot(b *testing.B) { - for _, size := range []int{10, 100, 1000, 10000} { + b.ReportAllocs() + + for _, size := range []int{10, 100, 1000} { ctx := context.TODO() tracer := trace.Noop merkleItems := make([][]byte, 0, size) for i := 0; i < size; i++ { - as_bytes := make([]byte, 32) - binary.BigEndian.PutUint32(as_bytes, uint32(i)) - merkleItems = append(merkleItems, as_bytes) + item := make([]byte, 32) + _, err := rand.Read(item) + if err != nil { + b.Fatal(err) + } + merkleItems = append(merkleItems, item) } + var root ids.ID var db merkledb.MerkleDB var err error + b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < size; i++ { @@ -36,9 +43,10 @@ func BenchmarkMerkleTxRoot(b *testing.B) { } } }) + // avoid compiler optimizations to cancel out the bench - res_root = root - res_db = db - res_err = err + resRoot = root + resDb = db + resErr = err } } From df2344f90cce9a601ef59378ae38d1f53f4ddd09 Mon Sep 17 00:00:00 2001 From: francois Date: Fri, 22 Mar 2024 19:56:44 +0100 Subject: [PATCH 11/48] put the 10k bench back --- merkle/merkle_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index a90db3cc3e..e5c391f833 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -17,9 +17,7 @@ var resDb merkledb.MerkleDB var resErr error func BenchmarkMerkleTxRoot(b *testing.B) { - b.ReportAllocs() - - for _, size := range []int{10, 100, 1000} { + for _, size := range []int{10, 100, 1000, 10000} { ctx := context.TODO() tracer := trace.Noop merkleItems := make([][]byte, 0, size) @@ -49,4 +47,6 @@ func BenchmarkMerkleTxRoot(b *testing.B) { resDb = db resErr = err } + + b.ReportAllocs() } From 8167c704652b90a37f895266fb930c1a91745bb1 Mon Sep 17 00:00:00 2001 From: francois Date: Sun, 31 Mar 2024 13:11:15 +0200 Subject: [PATCH 12/48] pass config by parameter --- merkle/merkle.go | 11 ++--------- merkle/merkle_test.go | 13 +++++++++++-- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index e76e7efbad..c2a493f74d 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -7,13 +7,12 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" ) // Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { +func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { @@ -24,13 +23,7 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] }) } - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - }) + db, err := merkledb.New(ctx, memdb.New(), config) if err != nil { return ids.Empty, nil, err } diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index e5c391f833..176411209c 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -3,12 +3,13 @@ package merkle import ( "testing" - "crypto/rand" "context" + "crypto/rand" "strconv" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" ) @@ -34,10 +35,18 @@ func BenchmarkMerkleTxRoot(b *testing.B) { var db merkledb.MerkleDB var err error + defaultConfig := merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + } + b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < size; i++ { - root, db, err = GenerateMerkleRoot(ctx, tracer, merkleItems, false) + root, db, err = GenerateMerkleRoot(ctx, defaultConfig, tracer, merkleItems, false) } } }) From 5800fb531de92099e6f1236fb709961402c3c99b Mon Sep 17 00:00:00 2001 From: francois Date: Mon, 15 Apr 2024 19:44:36 +0200 Subject: [PATCH 13/48] Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID" This reverts commit f009424f24e9a01b4d1fb83492ad8fdfd533d071. focus changes on the bench --- .gitignore | 3 +- chain/block.go | 1091 ---------------------------------------------- merkle/merkle.go | 2 +- 3 files changed, 2 insertions(+), 1094 deletions(-) delete mode 100644 chain/block.go diff --git a/.gitignore b/.gitignore index 664b72cc77..fca0663d0c 100644 --- a/.gitignore +++ b/.gitignore @@ -69,5 +69,4 @@ target/ Cargo.lock **/*.rs.bk -*.code-workspace -x/programs/cmd/simulator/simulator +x/programs/cmd/simulator/simulator \ No newline at end of file diff --git a/chain/block.go b/chain/block.go deleted file mode 100644 index c4ce0f7f6c..0000000000 --- a/chain/block.go +++ /dev/null @@ -1,1091 +0,0 @@ -// Copyright (C) 2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package chain - -import ( - "context" - "encoding/binary" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/x/merkledb" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "github.com/ava-labs/hypersdk/codec" - "github.com/ava-labs/hypersdk/consts" - "github.com/ava-labs/hypersdk/merkle" - "github.com/ava-labs/hypersdk/fees" - "github.com/ava-labs/hypersdk/state" - "github.com/ava-labs/hypersdk/utils" - "github.com/ava-labs/hypersdk/window" - "github.com/ava-labs/hypersdk/workers" -) - -var ( - _ snowman.Block = &StatelessBlock{} - _ block.WithVerifyContext = &StatelessBlock{} - _ block.StateSummary = &SyncableBlock{} -) - -type StatefulBlock struct { - Prnt ids.ID `json:"parent"` - Tmstmp int64 `json:"timestamp"` - Hght uint64 `json:"height"` - - Txs []*Transaction `json:"txs"` - TxsRoot ids.ID `json:"txsRoot"` - - // StateRoot is the root of the post-execution state - // of [Prnt]. - // - // This "deferred root" design allows for merklization - // to be done asynchronously instead of during [Build] - // or [Verify], which reduces the amount of time we are - // blocking the consensus engine from voting on the block, - // starting the verification of another block, etc. - StateRoot ids.ID `json:"stateRoot"` - WarpResults set.Bits64 `json:"warpResults"` - - size int - - // authCounts can be used by batch signature verification - // to preallocate memory - authCounts map[uint8]int -} - -func (b *StatefulBlock) Size() int { - return b.size -} - -func (b *StatefulBlock) ID() (ids.ID, error) { - blk, err := b.Marshal() - if err != nil { - return ids.ID{}, err - } - return utils.ToID(blk), nil -} - -// warpJob is used to signal to a listner that a *warp.Message has been -// verified. -type warpJob struct { - msg *warp.Message - signers int - verifiedChan chan bool - verified bool - warpNum int -} - -func NewGenesisBlock(root ids.ID) *StatefulBlock { - return &StatefulBlock{ - // We set the genesis block timestamp to be after the ProposerVM fork activation. - // - // This prevents an issue (when using millisecond timestamps) during ProposerVM activation - // where the child timestamp is rounded down to the nearest second (which may be before - // the timestamp of its parent, which is denoted in milliseconds). - // - // Link: https://github.com/ava-labs/avalanchego/blob/0ec52a9c6e5b879e367688db01bb10174d70b212 - // .../vms/proposervm/pre_fork_block.go#L201 - Tmstmp: time.Date(2023, time.January, 1, 0, 0, 0, 0, time.UTC).UnixMilli(), - - // StateRoot should include all allocates made when loading the genesis file - StateRoot: root, - } -} - -// Stateless is defined separately from "Block" -// in case external packages needs use the stateful block -// without mocking VM or parent block -type StatelessBlock struct { - *StatefulBlock `json:"block"` - - id ids.ID - st choices.Status - t time.Time - bytes []byte - txsSet set.Set[ids.ID] - - warpMessages map[ids.ID]*warpJob - containsWarp bool // this allows us to avoid allocating a map when we build - bctx *block.Context - vdrState validators.State - - results []*Result - feeManager *fees.Manager - - vm VM - view merkledb.View - - sigJob workers.Job -} - -func NewBlock(vm VM, parent snowman.Block, tmstp int64) *StatelessBlock { - return &StatelessBlock{ - StatefulBlock: &StatefulBlock{ - Prnt: parent.ID(), - Tmstmp: tmstp, - Hght: parent.Height() + 1, - }, - vm: vm, - st: choices.Processing, - } -} - -func ParseBlock( - ctx context.Context, - source []byte, - status choices.Status, - vm VM, -) (*StatelessBlock, error) { - ctx, span := vm.Tracer().Start(ctx, "chain.ParseBlock") - defer span.End() - - blk, err := UnmarshalBlock(source, vm) - if err != nil { - return nil, err - } - // Not guaranteed that a parsed block is verified - return ParseStatefulBlock(ctx, blk, source, status, vm) -} - -// populateTxs is only called on blocks we did not build -func (b *StatelessBlock) populateTxs(ctx context.Context) error { - ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.populateTxs") - defer span.End() - - // Setup signature verification job - _, sigVerifySpan := b.vm.Tracer().Start(ctx, "StatelessBlock.verifySignatures") //nolint:spancheck - job, err := b.vm.AuthVerifiers().NewJob(len(b.Txs)) - if err != nil { - return err //nolint:spancheck - } - b.sigJob = job - batchVerifier := NewAuthBatch(b.vm, b.sigJob, b.authCounts) - - // Make sure to always call [Done], otherwise we will block all future [Workers] - defer func() { - // BatchVerifier is given the responsibility to call [b.sigJob.Done()] because it may add things - // to the work queue async and that may not have completed by this point. - go batchVerifier.Done(func() { sigVerifySpan.End() }) - }() - - // Confirm no transaction duplicates and setup - // AWM processing - b.txsSet = set.NewSet[ids.ID](len(b.Txs)) - b.warpMessages = map[ids.ID]*warpJob{} - for _, tx := range b.Txs { - // Ensure there are no duplicate transactions - if b.txsSet.Contains(tx.ID()) { - return ErrDuplicateTx - } - b.txsSet.Add(tx.ID()) - - // Verify signature async - if b.vm.GetVerifyAuth() { - txDigest, err := tx.Digest() - if err != nil { - return err - } - batchVerifier.Add(txDigest, tx.Auth) - } - - // Check if we need the block context to verify the block (which contains - // an Avalanche Warp Message) - // - // Instead of erroring out if a warp message is invalid, we mark the - // verification as skipped and include it in the verification result so - // that a fee can still be deducted. - if tx.WarpMessage != nil { - if len(b.warpMessages) == MaxWarpMessages { - return ErrTooManyWarpMessages - } - signers, err := tx.WarpMessage.Signature.NumSigners() - if err != nil { - return err - } - b.warpMessages[tx.ID()] = &warpJob{ - msg: tx.WarpMessage, - signers: signers, - verifiedChan: make(chan bool, 1), - warpNum: len(b.warpMessages), - } - b.containsWarp = true - } - } - return nil -} - -func ParseStatefulBlock( - ctx context.Context, - blk *StatefulBlock, - source []byte, - status choices.Status, - vm VM, -) (*StatelessBlock, error) { - ctx, span := vm.Tracer().Start(ctx, "chain.ParseStatefulBlock") - defer span.End() - - // Perform basic correctness checks before doing any expensive work - if blk.Tmstmp > time.Now().Add(FutureBound).UnixMilli() { - return nil, ErrTimestampTooLate - } - - if len(source) == 0 { - nsource, err := blk.Marshal() - if err != nil { - return nil, err - } - source = nsource - } - b := &StatelessBlock{ - StatefulBlock: blk, - t: time.UnixMilli(blk.Tmstmp), - bytes: source, - st: status, - vm: vm, - id: utils.ToID(source), - } - - // If we are parsing an older block, it will not be re-executed and should - // not be tracked as a parsed block - lastAccepted := b.vm.LastAcceptedBlock() - if lastAccepted == nil || b.Hght <= lastAccepted.Hght { // nil when parsing genesis - return b, nil - } - - // Populate hashes and tx set - return b, b.populateTxs(ctx) -} - -// [initializeBuilt] is invoked after a block is built -func (b *StatelessBlock) initializeBuilt( - ctx context.Context, - view merkledb.View, - results []*Result, - feeManager *fees.Manager, -) error { - _, span := b.vm.Tracer().Start(ctx, "StatelessBlock.initializeBuilt") - defer span.End() - - blk, err := b.StatefulBlock.Marshal() - if err != nil { - return err - } - b.bytes = blk - b.id = utils.ToID(b.bytes) - b.view = view - b.t = time.UnixMilli(b.StatefulBlock.Tmstmp) - b.results = results - b.feeManager = feeManager - b.txsSet = set.NewSet[ids.ID](len(b.Txs)) - for _, tx := range b.Txs { - b.txsSet.Add(tx.ID()) - if tx.WarpMessage != nil { - b.containsWarp = true - } - } - - // transaction hash generation - // [len(b.Txs)] should be equal to [b.results] - merkleItems := make([][]byte, 0, len(b.Txs)) - for i := 0; i < len(b.Txs); i++ { - txID := b.Txs[i].ID() - resultOutput := b.results[i].Output - // [txID + resultOutput] - // txID is a fixed length array, hence [append] will always allocate new memory and copy - // so slice with new address will be returned and no reflect on txID, then later - // we consume those bytes - merkleItems = append(merkleItems, append(txID[:], resultOutput...)) - } - - // consume bytes to avoid extra copying - root, _, err := merkle.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems, true) - if err != nil { - return err - } - b.TxsRoot = root - - return nil -} - -// implements "snowman.Block.choices.Decidable" -func (b *StatelessBlock) ID() ids.ID { return b.id } - -// implements "block.WithVerifyContext" -func (b *StatelessBlock) ShouldVerifyWithContext(context.Context) (bool, error) { - return b.containsWarp, nil -} - -// implements "block.WithVerifyContext" -func (b *StatelessBlock) VerifyWithContext(ctx context.Context, bctx *block.Context) error { - start := time.Now() - defer func() { - b.vm.RecordBlockVerify(time.Since(start)) - }() - - stateReady := b.vm.StateReady() - ctx, span := b.vm.Tracer().Start( - ctx, "StatelessBlock.VerifyWithContext", - trace.WithAttributes( - attribute.Int("txs", len(b.Txs)), - attribute.Int64("height", int64(b.Hght)), - attribute.Bool("stateReady", stateReady), - attribute.Int64("pchainHeight", int64(bctx.PChainHeight)), - attribute.Bool("built", b.Processed()), - ), - ) - defer span.End() - - // Persist the context in case we need it during Accept - b.bctx = bctx - - // Proceed with normal verification - return b.verify(ctx, stateReady) -} - -// implements "snowman.Block" -func (b *StatelessBlock) Verify(ctx context.Context) error { - start := time.Now() - defer func() { - b.vm.RecordBlockVerify(time.Since(start)) - }() - - stateReady := b.vm.StateReady() - ctx, span := b.vm.Tracer().Start( - ctx, "StatelessBlock.Verify", - trace.WithAttributes( - attribute.Int("txs", len(b.Txs)), - attribute.Int64("height", int64(b.Hght)), - attribute.Bool("stateReady", stateReady), - attribute.Bool("built", b.Processed()), - ), - ) - defer span.End() - - return b.verify(ctx, stateReady) -} - -func (b *StatelessBlock) verify(ctx context.Context, stateReady bool) error { - log := b.vm.Logger() - switch { - case !stateReady: - // If the state of the accepted tip has not been fully fetched, it is not safe to - // verify any block. - log.Info( - "skipping verification, state not ready", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - ) - case b.Processed(): - // If we built the block, the state will already be populated and we don't - // need to compute it (we assume that we built a correct block and it isn't - // necessary to re-verify anything). - log.Info( - "skipping verification, already processed", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - ) - default: - // Get the [VerifyContext] needed to process this block. - // - // If the parent block's height is less than or equal to the last accepted height (and - // the last accepted height is processed), the accepted state will be used as the execution - // context. Otherwise, the parent block will be used as the execution context. - vctx, err := b.vm.GetVerifyContext(ctx, b.Hght, b.Prnt) - if err != nil { - b.vm.Logger().Warn("unable to get verify context", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Error(err), - ) - return fmt.Errorf("%w: unable to load verify context", err) - } - - // Parent block may not be processed when we verify this block, so [innerVerify] may - // recursively verify ancestry. - if err := b.innerVerify(ctx, vctx); err != nil { - b.vm.Logger().Warn("verification failed", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Error(err), - ) - return err - } - } - - // At any point after this, we may attempt to verify the block. We should be - // sure we are prepared to do so. - // - // NOTE: mempool is modified by VM handler - b.vm.Verified(ctx, b) - return nil -} - -// verifyWarpMessage will attempt to verify a given warp message provided by an -// Action. -func (b *StatelessBlock) verifyWarpMessage(ctx context.Context, r Rules, msg *warp.Message) bool { - // We do not check the validity of [SourceChainID] because a VM could send - // itself a message to trigger a chain upgrade. - allowed, num, denom := r.GetWarpConfig(msg.SourceChainID) - if !allowed { - b.vm.Logger(). - Warn("unable to verify warp message", zap.Stringer("warpID", msg.ID()), zap.Error(ErrDisabledChainID)) - return false - } - if err := msg.Signature.Verify( - ctx, - &msg.UnsignedMessage, - r.NetworkID(), - b.vdrState, - b.bctx.PChainHeight, - num, - denom, - ); err != nil { - b.vm.Logger(). - Warn("unable to verify warp message", zap.Stringer("warpID", msg.ID()), zap.Error(err)) - return false - } - return true -} - -// innerVerify executes the block on top of the provided [VerifyContext]. -// -// Invariants: -// Accepted / Rejected blocks should never have Verify called on them. -// Blocks that were verified (and returned nil) with Verify will not have verify called again. -// Blocks that were verified with VerifyWithContext may have verify called multiple times. -// -// When this may be called: -// 1. [Verify|VerifyWithContext] -// 2. If the parent view is missing when verifying (dynamic state sync) -// 3. If the view of a block we are accepting is missing (finishing dynamic -// state sync) -func (b *StatelessBlock) innerVerify(ctx context.Context, vctx VerifyContext) error { - var ( - log = b.vm.Logger() - r = b.vm.Rules(b.Tmstmp) - ) - - // Perform basic correctness checks before doing any expensive work - if b.Timestamp().UnixMilli() > time.Now().Add(FutureBound).UnixMilli() { - return ErrTimestampTooLate - } - - // Fetch view where we will apply block state transitions - // - // This call may result in our ancestry being verified. - parentView, err := vctx.View(ctx, true) - if err != nil { - return fmt.Errorf("%w: unable to load parent view", err) - } - - // Fetch parent height key and ensure block height is valid - heightKey := HeightKey(b.vm.StateManager().HeightKey()) - parentHeightRaw, err := parentView.GetValue(ctx, heightKey) - if err != nil { - return err - } - parentHeight := binary.BigEndian.Uint64(parentHeightRaw) - if b.Hght != parentHeight+1 { - return ErrInvalidBlockHeight - } - - // Fetch parent timestamp and confirm block timestamp is valid - // - // Parent may not be available (if we preformed state sync), so we - // can't rely on being able to fetch it during verification. - timestampKey := TimestampKey(b.vm.StateManager().TimestampKey()) - parentTimestampRaw, err := parentView.GetValue(ctx, timestampKey) - if err != nil { - return err - } - parentTimestamp := int64(binary.BigEndian.Uint64(parentTimestampRaw)) - if b.Tmstmp < parentTimestamp+r.GetMinBlockGap() { - return ErrTimestampTooEarly - } - if len(b.Txs) == 0 && b.Tmstmp < parentTimestamp+r.GetMinEmptyBlockGap() { - return ErrTimestampTooEarly - } - - // Ensure tx cannot be replayed - // - // Before node is considered ready (emap is fully populated), this may return - // false when other validators think it is true. - // - // If a block is already accepted, its transactions have already been added - // to the VM's seen emap and calling [IsRepeat] will return a non-zero value. - if b.st != choices.Accepted { - oldestAllowed := b.Tmstmp - r.GetValidityWindow() - if oldestAllowed < 0 { - // Can occur if verifying genesis - oldestAllowed = 0 - } - dup, err := vctx.IsRepeat(ctx, oldestAllowed, b.Txs, set.NewBits(), true) - if err != nil { - return err - } - if dup.Len() > 0 { - return fmt.Errorf("%w: duplicate in ancestry", ErrDuplicateTx) - } - } - - // Start validating warp messages, if they exist - var invalidWarpResult bool - if b.containsWarp { - if b.bctx == nil { - log.Error( - "missing verify block context", - zap.Uint64("height", b.Hght), - zap.Stringer("id", b.ID()), - ) - return ErrMissingBlockContext - } - _, warpVerifySpan := b.vm.Tracer().Start(ctx, "StatelessBlock.verifyWarpMessages") //nolint:spancheck - b.vdrState = b.vm.ValidatorState() - go func() { - defer warpVerifySpan.End() - // We don't use [b.vm.Workers] here because we need the warp verification - // results during normal execution. If we added a job to the workers queue, - // it would get executed after all signatures. Additionally, BLS - // Multi-Signature verification is already parallelized so we should just - // do one at a time to avoid overwhelming the CPU. - for txID, msg := range b.warpMessages { - if ctx.Err() != nil { - return - } - blockVerified := b.WarpResults.Contains(uint(msg.warpNum)) - if b.vm.IsBootstrapped() && !invalidWarpResult { - start := time.Now() - verified := b.verifyWarpMessage(ctx, r, msg.msg) - msg.verifiedChan <- verified - msg.verified = verified - log.Info( - "processed warp message", - zap.Stringer("txID", txID), - zap.Bool("verified", verified), - zap.Int("signers", msg.signers), - zap.Duration("t", time.Since(start)), - ) - if blockVerified != verified { - invalidWarpResult = true - } - } else { - // When we are bootstrapping, we just use the result in the block. - // - // We also use the result in the block when we have found - // a verification mismatch (our verify result is different than the - // block) to avoid doing extra work. - msg.verifiedChan <- blockVerified - msg.verified = blockVerified - } - } - }() - } - - // Compute next unit prices to use - feeKey := FeeKey(b.vm.StateManager().FeeKey()) - feeRaw, err := parentView.GetValue(ctx, feeKey) - if err != nil { - return err //nolint:spancheck - } - parentFeeManager := fees.NewManager(feeRaw) - feeManager, err := parentFeeManager.ComputeNext(parentTimestamp, b.Tmstmp, r) - if err != nil { - return err - } - - // Process transactions - results, ts, err := b.Execute(ctx, b.vm.Tracer(), parentView, feeManager, r) - if err != nil { - log.Error("failed to execute block", zap.Error(err)) - return err - } - b.results = results - b.feeManager = feeManager - - // Ensure warp results are correct - if invalidWarpResult { - return ErrWarpResultMismatch - } - numWarp := len(b.warpMessages) - if numWarp > MaxWarpMessages { - return ErrTooManyWarpMessages - } - var warpResultsLimit set.Bits64 - warpResultsLimit.Add(uint(numWarp)) - if b.WarpResults >= warpResultsLimit { - // If the value of [WarpResults] is greater than the value of uint64 with - // a 1-bit shifted [numWarp] times, then there are unused bits set to - // 1 (which should is not allowed). - return ErrWarpResultMismatch - } - - // Update chain metadata - heightKeyStr := string(heightKey) - timestampKeyStr := string(timestampKey) - feeKeyStr := string(feeKey) - - keys := make(state.Keys) - keys.Add(heightKeyStr, state.Write) - keys.Add(timestampKeyStr, state.Write) - keys.Add(feeKeyStr, state.Write) - tsv := ts.NewView(keys, map[string][]byte{ - heightKeyStr: parentHeightRaw, - timestampKeyStr: parentTimestampRaw, - feeKeyStr: parentFeeManager.Bytes(), - }) - if err := tsv.Insert(ctx, heightKey, binary.BigEndian.AppendUint64(nil, b.Hght)); err != nil { - return err - } - if err := tsv.Insert(ctx, timestampKey, binary.BigEndian.AppendUint64(nil, uint64(b.Tmstmp))); err != nil { - return err - } - if err := tsv.Insert(ctx, feeKey, feeManager.Bytes()); err != nil { - return err - } - tsv.Commit() - - // Compare state root - // - // Because fee bytes are not recorded in state, it is sufficient to check the state root - // to verify all fee calcuations were correct. - _, rspan := b.vm.Tracer().Start(ctx, "StatelessBlock.Verify.WaitRoot") - start := time.Now() - computedRoot, err := parentView.GetMerkleRoot(ctx) - rspan.End() - if err != nil { - return err - } - b.vm.RecordWaitRoot(time.Since(start)) - if b.StateRoot != computedRoot { - return fmt.Errorf( - "%w: expected=%s found=%s", - ErrStateRootMismatch, - computedRoot, - b.StateRoot, - ) - } - - // Ensure signatures are verified - _, sspan := b.vm.Tracer().Start(ctx, "StatelessBlock.Verify.WaitSignatures") - start = time.Now() - err = b.sigJob.Wait() - sspan.End() - if err != nil { - return err - } - b.vm.RecordWaitSignatures(time.Since(start)) - - // Get view from [tstate] after processing all state transitions - b.vm.RecordStateChanges(ts.PendingChanges()) - b.vm.RecordStateOperations(ts.OpIndex()) - view, err := ts.ExportMerkleDBView(ctx, b.vm.Tracer(), parentView) - if err != nil { - return err - } - b.view = view - - // Kickoff root generation - go func() { - start := time.Now() - root, err := view.GetMerkleRoot(ctx) - if err != nil { - log.Error("merkle root generation failed", zap.Error(err)) - return - } - log.Info("merkle root generated", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Stringer("root", root), - ) - b.vm.RecordRootCalculated(time.Since(start)) - }() - return nil -} - -// implements "snowman.Block.choices.Decidable" -func (b *StatelessBlock) Accept(ctx context.Context) error { - start := time.Now() - defer func() { - b.vm.RecordBlockAccept(time.Since(start)) - }() - - ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.Accept") - defer span.End() - - // Consider verifying the a block if it is not processed and we are no longer - // syncing. - if !b.Processed() { - // The state of this block was not calculated during the call to - // [StatelessBlock.Verify]. This is because the VM was state syncing - // and did not have the state necessary to verify the block. - updated, err := b.vm.UpdateSyncTarget(b) - if err != nil { - return err - } - if updated { - b.vm.Logger().Info("updated state sync target", - zap.Stringer("id", b.ID()), - zap.Stringer("root", b.StateRoot), - ) - return nil // the sync is still ongoing - } - - // This code handles the case where this block was not - // verified during state sync (stopped syncing with a - // processing block). - // - // If state sync completes before accept is called - // then we need to process it here. - b.vm.Logger().Info("verifying unprocessed block in accept", - zap.Stringer("id", b.ID()), - zap.Stringer("root", b.StateRoot), - ) - vctx, err := b.vm.GetVerifyContext(ctx, b.Hght, b.Prnt) - if err != nil { - return fmt.Errorf("%w: unable to get verify context", err) - } - if err := b.innerVerify(ctx, vctx); err != nil { - return fmt.Errorf("%w: unable to verify block", err) - } - } - - // Commit view if we don't return before here (would happen if we are still - // syncing) - if err := b.view.CommitToDB(ctx); err != nil { - return fmt.Errorf("%w: unable to commit block", err) - } - - // Mark block as accepted and update last accepted in storage - b.MarkAccepted(ctx) - return nil -} - -func (b *StatelessBlock) MarkAccepted(ctx context.Context) { - // Accept block and free unnecessary memory - b.st = choices.Accepted - b.txsSet = nil // only used for replay protection when processing - - // [Accepted] will persist the block to disk and set in-memory variables - // needed to ensure we don't resync all blocks when state sync finishes. - // - // Note: We will not call [b.vm.Verified] before accepting during state sync - b.vm.Accepted(ctx, b) -} - -// implements "snowman.Block.choices.Decidable" -func (b *StatelessBlock) Reject(ctx context.Context) error { - ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.Reject") - defer span.End() - - b.st = choices.Rejected - b.vm.Rejected(ctx, b) - return nil -} - -// implements "snowman.Block.choices.Decidable" -func (b *StatelessBlock) Status() choices.Status { return b.st } - -// implements "snowman.Block" -func (b *StatelessBlock) Parent() ids.ID { return b.StatefulBlock.Prnt } - -// implements "snowman.Block" -func (b *StatelessBlock) Bytes() []byte { return b.bytes } - -// implements "snowman.Block" -func (b *StatelessBlock) Height() uint64 { return b.StatefulBlock.Hght } - -// implements "snowman.Block" -func (b *StatelessBlock) Timestamp() time.Time { return b.t } - -// Used to determine if should notify listeners and/or pass to controller -func (b *StatelessBlock) Processed() bool { - return b.view != nil -} - -// View returns the [merkledb.TrieView] of the block (representing the state -// post-execution) or returns the accepted state if the block is accepted or -// is height 0 (genesis). -// -// If [b.view] is nil (not processed), this function will either return an error or will -// run verification (depending on whether the height is in [acceptedState]). -// -// We still need to handle returning the accepted state here because -// the [VM] will call [View] on the preferred tip of the chain (whether or -// not it is accepted). -// -// Invariant: [View] with [verify] == true should not be called concurrently, otherwise, -// it will result in undefined behavior. -func (b *StatelessBlock) View(ctx context.Context, verify bool) (state.View, error) { - ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.View", - trace.WithAttributes( - attribute.Bool("processed", b.Processed()), - attribute.Bool("verify", verify), - ), - ) - defer span.End() - - // If this is the genesis block, return the base state. - if b.Hght == 0 { - return b.vm.State() - } - - // If block is processed, we can return either the accepted state - // or its pending view. - if b.Processed() { - if b.st == choices.Accepted { - // We assume that base state was properly updated if this - // block was accepted (this is not obvious because - // the accepted state may be that of the parent of the last - // accepted block right after state sync finishes). - return b.vm.State() - } - return b.view, nil - } - - // If the block is not processed but [acceptedState] equals the height - // of the block, we should return the accepted state. - // - // This can happen when we are building a child block immediately after - // restart (latest block will not be considered [Processed] because there - // will be no attached view from execution). - // - // We cannot use the merkle root to check against the accepted state - // because the block only contains the root of the parent block's post-execution. - if b.st == choices.Accepted { - acceptedState, err := b.vm.State() - if err != nil { - return nil, err - } - acceptedHeightRaw, err := acceptedState.Get(HeightKey(b.vm.StateManager().HeightKey())) - if err != nil { - return nil, err - } - acceptedHeight := binary.BigEndian.Uint64(acceptedHeightRaw) - if acceptedHeight == b.Hght { - b.vm.Logger().Info("accepted block not processed but found post-execution state on-disk", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Bool("verify", verify), - ) - return acceptedState, nil - } - b.vm.Logger().Info("accepted block not processed and does not match state on-disk", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Bool("verify", verify), - ) - } else { - b.vm.Logger().Info("block not processed", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Bool("verify", verify), - ) - } - if !verify { - return nil, ErrBlockNotProcessed - } - - // If there are no processing blocks when state sync finishes, - // the first block we attempt to verify will reach this execution - // path. - // - // In this scenario, the last accepted block will not be processed - // and [acceptedState] will correspond to the post-execution state - // of the new block's grandparent (our parent). To remedy this, - // we need to process this block to return a valid view. - b.vm.Logger().Info("verifying block when view requested", - zap.Uint64("height", b.Hght), - zap.Stringer("blkID", b.ID()), - zap.Bool("accepted", b.st == choices.Accepted), - ) - vctx, err := b.vm.GetVerifyContext(ctx, b.Hght, b.Prnt) - if err != nil { - b.vm.Logger().Error("unable to get verify context", zap.Error(err)) - return nil, err - } - if err := b.innerVerify(ctx, vctx); err != nil { - b.vm.Logger().Error("unable to verify block", zap.Error(err)) - return nil, err - } - if b.st != choices.Accepted { - return b.view, nil - } - - // If the block is already accepted, we should update - // the accepted state to ensure future calls to [View] - // return the correct state (now that the block is considered - // processed). - // - // It is not possible to reach this function if this block - // is not the child of the block whose post-execution state - // is currently stored on disk, so it is safe to call [CommitToDB]. - if err := b.view.CommitToDB(ctx); err != nil { - b.vm.Logger().Error("unable to commit to DB", zap.Error(err)) - return nil, err - } - return b.vm.State() -} - -// IsRepeat returns a bitset of all transactions that are considered repeats in -// the range that spans back to [oldestAllowed]. -// -// If [stop] is set to true, IsRepeat will return as soon as the first repeat -// is found (useful for block verification). -func (b *StatelessBlock) IsRepeat( - ctx context.Context, - oldestAllowed int64, - txs []*Transaction, - marker set.Bits, - stop bool, -) (set.Bits, error) { - ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.IsRepeat") - defer span.End() - - // Early exit if we are already back at least [ValidityWindow] - // - // It is critical to ensure this logic is equivalent to [emap] to avoid - // non-deterministic verification. - if b.Tmstmp < oldestAllowed { - return marker, nil - } - - // If we are at an accepted block or genesis, we can use the emap on the VM - // instead of checking each block - if b.st == choices.Accepted || b.Hght == 0 /* genesis */ { - return b.vm.IsRepeat(ctx, txs, marker, stop), nil - } - - // Check if block contains any overlapping txs - for i, tx := range txs { - if marker.Contains(i) { - continue - } - if b.txsSet.Contains(tx.ID()) { - marker.Add(i) - if stop { - return marker, nil - } - } - } - prnt, err := b.vm.GetStatelessBlock(ctx, b.Prnt) - if err != nil { - return marker, err - } - return prnt.IsRepeat(ctx, oldestAllowed, txs, marker, stop) -} - -func (b *StatelessBlock) GetTxs() []*Transaction { - return b.Txs -} - -func (b *StatelessBlock) GetTimestamp() int64 { - return b.Tmstmp -} - -func (b *StatelessBlock) Results() []*Result { - return b.results -} - -func (b *StatelessBlock) FeeManager() *fees.Manager { - return b.feeManager -} - -func (b *StatefulBlock) Marshal() ([]byte, error) { - size := consts.IDLen + consts.Uint64Len + consts.Uint64Len + - consts.Uint64Len + window.WindowSliceSize + - consts.IntLen + codec.CummSize(b.Txs) + - consts.IDLen + consts.Uint64Len + consts.Uint64Len - - p := codec.NewWriter(size, consts.NetworkSizeLimit) - - p.PackID(b.Prnt) - p.PackInt64(b.Tmstmp) - p.PackUint64(b.Hght) - - p.PackInt(len(b.Txs)) - b.authCounts = map[uint8]int{} - for _, tx := range b.Txs { - if err := tx.Marshal(p); err != nil { - return nil, err - } - b.authCounts[tx.Auth.GetTypeID()]++ - } - - p.PackID(b.StateRoot) - p.PackUint64(uint64(b.WarpResults)) - p.PackID(b.TxsRoot) - - bytes := p.Bytes() - if err := p.Err(); err != nil { - return nil, err - } - b.size = len(bytes) - return bytes, nil -} - -func UnmarshalBlock(raw []byte, parser Parser) (*StatefulBlock, error) { - var ( - p = codec.NewReader(raw, consts.NetworkSizeLimit) - b StatefulBlock - ) - b.size = len(raw) - - p.UnpackID(false, &b.Prnt) - b.Tmstmp = p.UnpackInt64(false) - b.Hght = p.UnpackUint64(false) - - // Parse transactions - txCount := p.UnpackInt(false) // can produce empty blocks - actionRegistry, authRegistry := parser.Registry() - b.Txs = []*Transaction{} // don't preallocate all to avoid DoS - b.authCounts = map[uint8]int{} - for i := 0; i < txCount; i++ { - tx, err := UnmarshalTx(p, actionRegistry, authRegistry) - if err != nil { - return nil, err - } - b.Txs = append(b.Txs, tx) - b.authCounts[tx.Auth.GetTypeID()]++ - } - - p.UnpackID(false, &b.StateRoot) - b.WarpResults = set.Bits64(p.UnpackUint64(false)) - p.UnpackID(false, &b.TxsRoot) - - // Ensure no leftover bytes - if !p.Empty() { - return nil, fmt.Errorf("%w: remaining=%d", ErrInvalidObject, len(raw)-p.Offset()) - } - return &b, p.Err() -} - -type SyncableBlock struct { - *StatelessBlock -} - -func (sb *SyncableBlock) Accept(ctx context.Context) (block.StateSyncMode, error) { - return sb.vm.AcceptedSyncableBlock(ctx, sb) -} - -func NewSyncableBlock(sb *StatelessBlock) *SyncableBlock { - return &SyncableBlock{sb} -} - -func (sb *SyncableBlock) String() string { - return fmt.Sprintf("%d:%s root=%s", sb.Height(), sb.ID(), sb.StateRoot) -} - -// Testing -func (b *StatelessBlock) MarkUnprocessed() { - b.view = nil -} diff --git a/merkle/merkle.go b/merkle/merkle.go index c2a493f74d..c2a7088fea 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -5,10 +5,10 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" + "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items From df976037e00b51570eeb7d4a633a0c816d734703 Mon Sep 17 00:00:00 2001 From: francois Date: Mon, 15 Apr 2024 19:45:30 +0200 Subject: [PATCH 14/48] resurrect block.go --- chain/block.go | 1065 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1065 insertions(+) create mode 100644 chain/block.go diff --git a/chain/block.go b/chain/block.go new file mode 100644 index 0000000000..7be9c7ec6f --- /dev/null +++ b/chain/block.go @@ -0,0 +1,1065 @@ +// Copyright (C) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/x/merkledb" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/ava-labs/hypersdk/codec" + "github.com/ava-labs/hypersdk/consts" + "github.com/ava-labs/hypersdk/fees" + "github.com/ava-labs/hypersdk/state" + "github.com/ava-labs/hypersdk/utils" + "github.com/ava-labs/hypersdk/window" + "github.com/ava-labs/hypersdk/workers" +) + +var ( + _ snowman.Block = &StatelessBlock{} + _ block.WithVerifyContext = &StatelessBlock{} + _ block.StateSummary = &SyncableBlock{} +) + +type StatefulBlock struct { + Prnt ids.ID `json:"parent"` + Tmstmp int64 `json:"timestamp"` + Hght uint64 `json:"height"` + + Txs []*Transaction `json:"txs"` + + // StateRoot is the root of the post-execution state + // of [Prnt]. + // + // This "deferred root" design allows for merklization + // to be done asynchronously instead of during [Build] + // or [Verify], which reduces the amount of time we are + // blocking the consensus engine from voting on the block, + // starting the verification of another block, etc. + StateRoot ids.ID `json:"stateRoot"` + WarpResults set.Bits64 `json:"warpResults"` + + size int + + // authCounts can be used by batch signature verification + // to preallocate memory + authCounts map[uint8]int +} + +func (b *StatefulBlock) Size() int { + return b.size +} + +func (b *StatefulBlock) ID() (ids.ID, error) { + blk, err := b.Marshal() + if err != nil { + return ids.ID{}, err + } + return utils.ToID(blk), nil +} + +// warpJob is used to signal to a listner that a *warp.Message has been +// verified. +type warpJob struct { + msg *warp.Message + signers int + verifiedChan chan bool + verified bool + warpNum int +} + +func NewGenesisBlock(root ids.ID) *StatefulBlock { + return &StatefulBlock{ + // We set the genesis block timestamp to be after the ProposerVM fork activation. + // + // This prevents an issue (when using millisecond timestamps) during ProposerVM activation + // where the child timestamp is rounded down to the nearest second (which may be before + // the timestamp of its parent, which is denoted in milliseconds). + // + // Link: https://github.com/ava-labs/avalanchego/blob/0ec52a9c6e5b879e367688db01bb10174d70b212 + // .../vms/proposervm/pre_fork_block.go#L201 + Tmstmp: time.Date(2023, time.January, 1, 0, 0, 0, 0, time.UTC).UnixMilli(), + + // StateRoot should include all allocates made when loading the genesis file + StateRoot: root, + } +} + +// Stateless is defined separately from "Block" +// in case external packages needs use the stateful block +// without mocking VM or parent block +type StatelessBlock struct { + *StatefulBlock `json:"block"` + + id ids.ID + st choices.Status + t time.Time + bytes []byte + txsSet set.Set[ids.ID] + + warpMessages map[ids.ID]*warpJob + containsWarp bool // this allows us to avoid allocating a map when we build + bctx *block.Context + vdrState validators.State + + results []*Result + feeManager *fees.Manager + + vm VM + view merkledb.View + + sigJob workers.Job +} + +func NewBlock(vm VM, parent snowman.Block, tmstp int64) *StatelessBlock { + return &StatelessBlock{ + StatefulBlock: &StatefulBlock{ + Prnt: parent.ID(), + Tmstmp: tmstp, + Hght: parent.Height() + 1, + }, + vm: vm, + st: choices.Processing, + } +} + +func ParseBlock( + ctx context.Context, + source []byte, + status choices.Status, + vm VM, +) (*StatelessBlock, error) { + ctx, span := vm.Tracer().Start(ctx, "chain.ParseBlock") + defer span.End() + + blk, err := UnmarshalBlock(source, vm) + if err != nil { + return nil, err + } + // Not guaranteed that a parsed block is verified + return ParseStatefulBlock(ctx, blk, source, status, vm) +} + +// populateTxs is only called on blocks we did not build +func (b *StatelessBlock) populateTxs(ctx context.Context) error { + ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.populateTxs") + defer span.End() + + // Setup signature verification job + _, sigVerifySpan := b.vm.Tracer().Start(ctx, "StatelessBlock.verifySignatures") //nolint:spancheck + job, err := b.vm.AuthVerifiers().NewJob(len(b.Txs)) + if err != nil { + return err //nolint:spancheck + } + b.sigJob = job + batchVerifier := NewAuthBatch(b.vm, b.sigJob, b.authCounts) + + // Make sure to always call [Done], otherwise we will block all future [Workers] + defer func() { + // BatchVerifier is given the responsibility to call [b.sigJob.Done()] because it may add things + // to the work queue async and that may not have completed by this point. + go batchVerifier.Done(func() { sigVerifySpan.End() }) + }() + + // Confirm no transaction duplicates and setup + // AWM processing + b.txsSet = set.NewSet[ids.ID](len(b.Txs)) + b.warpMessages = map[ids.ID]*warpJob{} + for _, tx := range b.Txs { + // Ensure there are no duplicate transactions + if b.txsSet.Contains(tx.ID()) { + return ErrDuplicateTx + } + b.txsSet.Add(tx.ID()) + + // Verify signature async + if b.vm.GetVerifyAuth() { + txDigest, err := tx.Digest() + if err != nil { + return err + } + batchVerifier.Add(txDigest, tx.Auth) + } + + // Check if we need the block context to verify the block (which contains + // an Avalanche Warp Message) + // + // Instead of erroring out if a warp message is invalid, we mark the + // verification as skipped and include it in the verification result so + // that a fee can still be deducted. + if tx.WarpMessage != nil { + if len(b.warpMessages) == MaxWarpMessages { + return ErrTooManyWarpMessages + } + signers, err := tx.WarpMessage.Signature.NumSigners() + if err != nil { + return err + } + b.warpMessages[tx.ID()] = &warpJob{ + msg: tx.WarpMessage, + signers: signers, + verifiedChan: make(chan bool, 1), + warpNum: len(b.warpMessages), + } + b.containsWarp = true + } + } + return nil +} + +func ParseStatefulBlock( + ctx context.Context, + blk *StatefulBlock, + source []byte, + status choices.Status, + vm VM, +) (*StatelessBlock, error) { + ctx, span := vm.Tracer().Start(ctx, "chain.ParseStatefulBlock") + defer span.End() + + // Perform basic correctness checks before doing any expensive work + if blk.Tmstmp > time.Now().Add(FutureBound).UnixMilli() { + return nil, ErrTimestampTooLate + } + + if len(source) == 0 { + nsource, err := blk.Marshal() + if err != nil { + return nil, err + } + source = nsource + } + b := &StatelessBlock{ + StatefulBlock: blk, + t: time.UnixMilli(blk.Tmstmp), + bytes: source, + st: status, + vm: vm, + id: utils.ToID(source), + } + + // If we are parsing an older block, it will not be re-executed and should + // not be tracked as a parsed block + lastAccepted := b.vm.LastAcceptedBlock() + if lastAccepted == nil || b.Hght <= lastAccepted.Hght { // nil when parsing genesis + return b, nil + } + + // Populate hashes and tx set + return b, b.populateTxs(ctx) +} + +// [initializeBuilt] is invoked after a block is built +func (b *StatelessBlock) initializeBuilt( + ctx context.Context, + view merkledb.View, + results []*Result, + feeManager *fees.Manager, +) error { + _, span := b.vm.Tracer().Start(ctx, "StatelessBlock.initializeBuilt") + defer span.End() + + blk, err := b.StatefulBlock.Marshal() + if err != nil { + return err + } + b.bytes = blk + b.id = utils.ToID(b.bytes) + b.view = view + b.t = time.UnixMilli(b.StatefulBlock.Tmstmp) + b.results = results + b.feeManager = feeManager + b.txsSet = set.NewSet[ids.ID](len(b.Txs)) + for _, tx := range b.Txs { + b.txsSet.Add(tx.ID()) + if tx.WarpMessage != nil { + b.containsWarp = true + } + } + return nil +} + +// implements "snowman.Block.choices.Decidable" +func (b *StatelessBlock) ID() ids.ID { return b.id } + +// implements "block.WithVerifyContext" +func (b *StatelessBlock) ShouldVerifyWithContext(context.Context) (bool, error) { + return b.containsWarp, nil +} + +// implements "block.WithVerifyContext" +func (b *StatelessBlock) VerifyWithContext(ctx context.Context, bctx *block.Context) error { + start := time.Now() + defer func() { + b.vm.RecordBlockVerify(time.Since(start)) + }() + + stateReady := b.vm.StateReady() + ctx, span := b.vm.Tracer().Start( + ctx, "StatelessBlock.VerifyWithContext", + trace.WithAttributes( + attribute.Int("txs", len(b.Txs)), + attribute.Int64("height", int64(b.Hght)), + attribute.Bool("stateReady", stateReady), + attribute.Int64("pchainHeight", int64(bctx.PChainHeight)), + attribute.Bool("built", b.Processed()), + ), + ) + defer span.End() + + // Persist the context in case we need it during Accept + b.bctx = bctx + + // Proceed with normal verification + return b.verify(ctx, stateReady) +} + +// implements "snowman.Block" +func (b *StatelessBlock) Verify(ctx context.Context) error { + start := time.Now() + defer func() { + b.vm.RecordBlockVerify(time.Since(start)) + }() + + stateReady := b.vm.StateReady() + ctx, span := b.vm.Tracer().Start( + ctx, "StatelessBlock.Verify", + trace.WithAttributes( + attribute.Int("txs", len(b.Txs)), + attribute.Int64("height", int64(b.Hght)), + attribute.Bool("stateReady", stateReady), + attribute.Bool("built", b.Processed()), + ), + ) + defer span.End() + + return b.verify(ctx, stateReady) +} + +func (b *StatelessBlock) verify(ctx context.Context, stateReady bool) error { + log := b.vm.Logger() + switch { + case !stateReady: + // If the state of the accepted tip has not been fully fetched, it is not safe to + // verify any block. + log.Info( + "skipping verification, state not ready", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + ) + case b.Processed(): + // If we built the block, the state will already be populated and we don't + // need to compute it (we assume that we built a correct block and it isn't + // necessary to re-verify anything). + log.Info( + "skipping verification, already processed", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + ) + default: + // Get the [VerifyContext] needed to process this block. + // + // If the parent block's height is less than or equal to the last accepted height (and + // the last accepted height is processed), the accepted state will be used as the execution + // context. Otherwise, the parent block will be used as the execution context. + vctx, err := b.vm.GetVerifyContext(ctx, b.Hght, b.Prnt) + if err != nil { + b.vm.Logger().Warn("unable to get verify context", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Error(err), + ) + return fmt.Errorf("%w: unable to load verify context", err) + } + + // Parent block may not be processed when we verify this block, so [innerVerify] may + // recursively verify ancestry. + if err := b.innerVerify(ctx, vctx); err != nil { + b.vm.Logger().Warn("verification failed", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Error(err), + ) + return err + } + } + + // At any point after this, we may attempt to verify the block. We should be + // sure we are prepared to do so. + // + // NOTE: mempool is modified by VM handler + b.vm.Verified(ctx, b) + return nil +} + +// verifyWarpMessage will attempt to verify a given warp message provided by an +// Action. +func (b *StatelessBlock) verifyWarpMessage(ctx context.Context, r Rules, msg *warp.Message) bool { + // We do not check the validity of [SourceChainID] because a VM could send + // itself a message to trigger a chain upgrade. + allowed, num, denom := r.GetWarpConfig(msg.SourceChainID) + if !allowed { + b.vm.Logger(). + Warn("unable to verify warp message", zap.Stringer("warpID", msg.ID()), zap.Error(ErrDisabledChainID)) + return false + } + if err := msg.Signature.Verify( + ctx, + &msg.UnsignedMessage, + r.NetworkID(), + b.vdrState, + b.bctx.PChainHeight, + num, + denom, + ); err != nil { + b.vm.Logger(). + Warn("unable to verify warp message", zap.Stringer("warpID", msg.ID()), zap.Error(err)) + return false + } + return true +} + +// innerVerify executes the block on top of the provided [VerifyContext]. +// +// Invariants: +// Accepted / Rejected blocks should never have Verify called on them. +// Blocks that were verified (and returned nil) with Verify will not have verify called again. +// Blocks that were verified with VerifyWithContext may have verify called multiple times. +// +// When this may be called: +// 1. [Verify|VerifyWithContext] +// 2. If the parent view is missing when verifying (dynamic state sync) +// 3. If the view of a block we are accepting is missing (finishing dynamic +// state sync) +func (b *StatelessBlock) innerVerify(ctx context.Context, vctx VerifyContext) error { + var ( + log = b.vm.Logger() + r = b.vm.Rules(b.Tmstmp) + ) + + // Perform basic correctness checks before doing any expensive work + if b.Timestamp().UnixMilli() > time.Now().Add(FutureBound).UnixMilli() { + return ErrTimestampTooLate + } + + // Fetch view where we will apply block state transitions + // + // This call may result in our ancestry being verified. + parentView, err := vctx.View(ctx, true) + if err != nil { + return fmt.Errorf("%w: unable to load parent view", err) + } + + // Fetch parent height key and ensure block height is valid + heightKey := HeightKey(b.vm.StateManager().HeightKey()) + parentHeightRaw, err := parentView.GetValue(ctx, heightKey) + if err != nil { + return err + } + parentHeight := binary.BigEndian.Uint64(parentHeightRaw) + if b.Hght != parentHeight+1 { + return ErrInvalidBlockHeight + } + + // Fetch parent timestamp and confirm block timestamp is valid + // + // Parent may not be available (if we preformed state sync), so we + // can't rely on being able to fetch it during verification. + timestampKey := TimestampKey(b.vm.StateManager().TimestampKey()) + parentTimestampRaw, err := parentView.GetValue(ctx, timestampKey) + if err != nil { + return err + } + parentTimestamp := int64(binary.BigEndian.Uint64(parentTimestampRaw)) + if b.Tmstmp < parentTimestamp+r.GetMinBlockGap() { + return ErrTimestampTooEarly + } + if len(b.Txs) == 0 && b.Tmstmp < parentTimestamp+r.GetMinEmptyBlockGap() { + return ErrTimestampTooEarly + } + + // Ensure tx cannot be replayed + // + // Before node is considered ready (emap is fully populated), this may return + // false when other validators think it is true. + // + // If a block is already accepted, its transactions have already been added + // to the VM's seen emap and calling [IsRepeat] will return a non-zero value. + if b.st != choices.Accepted { + oldestAllowed := b.Tmstmp - r.GetValidityWindow() + if oldestAllowed < 0 { + // Can occur if verifying genesis + oldestAllowed = 0 + } + dup, err := vctx.IsRepeat(ctx, oldestAllowed, b.Txs, set.NewBits(), true) + if err != nil { + return err + } + if dup.Len() > 0 { + return fmt.Errorf("%w: duplicate in ancestry", ErrDuplicateTx) + } + } + + // Start validating warp messages, if they exist + var invalidWarpResult bool + if b.containsWarp { + if b.bctx == nil { + log.Error( + "missing verify block context", + zap.Uint64("height", b.Hght), + zap.Stringer("id", b.ID()), + ) + return ErrMissingBlockContext + } + _, warpVerifySpan := b.vm.Tracer().Start(ctx, "StatelessBlock.verifyWarpMessages") //nolint:spancheck + b.vdrState = b.vm.ValidatorState() + go func() { + defer warpVerifySpan.End() + // We don't use [b.vm.Workers] here because we need the warp verification + // results during normal execution. If we added a job to the workers queue, + // it would get executed after all signatures. Additionally, BLS + // Multi-Signature verification is already parallelized so we should just + // do one at a time to avoid overwhelming the CPU. + for txID, msg := range b.warpMessages { + if ctx.Err() != nil { + return + } + blockVerified := b.WarpResults.Contains(uint(msg.warpNum)) + if b.vm.IsBootstrapped() && !invalidWarpResult { + start := time.Now() + verified := b.verifyWarpMessage(ctx, r, msg.msg) + msg.verifiedChan <- verified + msg.verified = verified + log.Info( + "processed warp message", + zap.Stringer("txID", txID), + zap.Bool("verified", verified), + zap.Int("signers", msg.signers), + zap.Duration("t", time.Since(start)), + ) + if blockVerified != verified { + invalidWarpResult = true + } + } else { + // When we are bootstrapping, we just use the result in the block. + // + // We also use the result in the block when we have found + // a verification mismatch (our verify result is different than the + // block) to avoid doing extra work. + msg.verifiedChan <- blockVerified + msg.verified = blockVerified + } + } + }() + } + + // Compute next unit prices to use + feeKey := FeeKey(b.vm.StateManager().FeeKey()) + feeRaw, err := parentView.GetValue(ctx, feeKey) + if err != nil { + return err //nolint:spancheck + } + parentFeeManager := fees.NewManager(feeRaw) + feeManager, err := parentFeeManager.ComputeNext(parentTimestamp, b.Tmstmp, r) + if err != nil { + return err + } + + // Process transactions + results, ts, err := b.Execute(ctx, b.vm.Tracer(), parentView, feeManager, r) + if err != nil { + log.Error("failed to execute block", zap.Error(err)) + return err + } + b.results = results + b.feeManager = feeManager + + // Ensure warp results are correct + if invalidWarpResult { + return ErrWarpResultMismatch + } + numWarp := len(b.warpMessages) + if numWarp > MaxWarpMessages { + return ErrTooManyWarpMessages + } + var warpResultsLimit set.Bits64 + warpResultsLimit.Add(uint(numWarp)) + if b.WarpResults >= warpResultsLimit { + // If the value of [WarpResults] is greater than the value of uint64 with + // a 1-bit shifted [numWarp] times, then there are unused bits set to + // 1 (which should is not allowed). + return ErrWarpResultMismatch + } + + // Update chain metadata + heightKeyStr := string(heightKey) + timestampKeyStr := string(timestampKey) + feeKeyStr := string(feeKey) + + keys := make(state.Keys) + keys.Add(heightKeyStr, state.Write) + keys.Add(timestampKeyStr, state.Write) + keys.Add(feeKeyStr, state.Write) + tsv := ts.NewView(keys, map[string][]byte{ + heightKeyStr: parentHeightRaw, + timestampKeyStr: parentTimestampRaw, + feeKeyStr: parentFeeManager.Bytes(), + }) + if err := tsv.Insert(ctx, heightKey, binary.BigEndian.AppendUint64(nil, b.Hght)); err != nil { + return err + } + if err := tsv.Insert(ctx, timestampKey, binary.BigEndian.AppendUint64(nil, uint64(b.Tmstmp))); err != nil { + return err + } + if err := tsv.Insert(ctx, feeKey, feeManager.Bytes()); err != nil { + return err + } + tsv.Commit() + + // Compare state root + // + // Because fee bytes are not recorded in state, it is sufficient to check the state root + // to verify all fee calcuations were correct. + _, rspan := b.vm.Tracer().Start(ctx, "StatelessBlock.Verify.WaitRoot") + start := time.Now() + computedRoot, err := parentView.GetMerkleRoot(ctx) + rspan.End() + if err != nil { + return err + } + b.vm.RecordWaitRoot(time.Since(start)) + if b.StateRoot != computedRoot { + return fmt.Errorf( + "%w: expected=%s found=%s", + ErrStateRootMismatch, + computedRoot, + b.StateRoot, + ) + } + + // Ensure signatures are verified + _, sspan := b.vm.Tracer().Start(ctx, "StatelessBlock.Verify.WaitSignatures") + start = time.Now() + err = b.sigJob.Wait() + sspan.End() + if err != nil { + return err + } + b.vm.RecordWaitSignatures(time.Since(start)) + + // Get view from [tstate] after processing all state transitions + b.vm.RecordStateChanges(ts.PendingChanges()) + b.vm.RecordStateOperations(ts.OpIndex()) + view, err := ts.ExportMerkleDBView(ctx, b.vm.Tracer(), parentView) + if err != nil { + return err + } + b.view = view + + // Kickoff root generation + go func() { + start := time.Now() + root, err := view.GetMerkleRoot(ctx) + if err != nil { + log.Error("merkle root generation failed", zap.Error(err)) + return + } + log.Info("merkle root generated", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Stringer("root", root), + ) + b.vm.RecordRootCalculated(time.Since(start)) + }() + return nil +} + +// implements "snowman.Block.choices.Decidable" +func (b *StatelessBlock) Accept(ctx context.Context) error { + start := time.Now() + defer func() { + b.vm.RecordBlockAccept(time.Since(start)) + }() + + ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.Accept") + defer span.End() + + // Consider verifying the a block if it is not processed and we are no longer + // syncing. + if !b.Processed() { + // The state of this block was not calculated during the call to + // [StatelessBlock.Verify]. This is because the VM was state syncing + // and did not have the state necessary to verify the block. + updated, err := b.vm.UpdateSyncTarget(b) + if err != nil { + return err + } + if updated { + b.vm.Logger().Info("updated state sync target", + zap.Stringer("id", b.ID()), + zap.Stringer("root", b.StateRoot), + ) + return nil // the sync is still ongoing + } + + // This code handles the case where this block was not + // verified during state sync (stopped syncing with a + // processing block). + // + // If state sync completes before accept is called + // then we need to process it here. + b.vm.Logger().Info("verifying unprocessed block in accept", + zap.Stringer("id", b.ID()), + zap.Stringer("root", b.StateRoot), + ) + vctx, err := b.vm.GetVerifyContext(ctx, b.Hght, b.Prnt) + if err != nil { + return fmt.Errorf("%w: unable to get verify context", err) + } + if err := b.innerVerify(ctx, vctx); err != nil { + return fmt.Errorf("%w: unable to verify block", err) + } + } + + // Commit view if we don't return before here (would happen if we are still + // syncing) + if err := b.view.CommitToDB(ctx); err != nil { + return fmt.Errorf("%w: unable to commit block", err) + } + + // Mark block as accepted and update last accepted in storage + b.MarkAccepted(ctx) + return nil +} + +func (b *StatelessBlock) MarkAccepted(ctx context.Context) { + // Accept block and free unnecessary memory + b.st = choices.Accepted + b.txsSet = nil // only used for replay protection when processing + + // [Accepted] will persist the block to disk and set in-memory variables + // needed to ensure we don't resync all blocks when state sync finishes. + // + // Note: We will not call [b.vm.Verified] before accepting during state sync + b.vm.Accepted(ctx, b) +} + +// implements "snowman.Block.choices.Decidable" +func (b *StatelessBlock) Reject(ctx context.Context) error { + ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.Reject") + defer span.End() + + b.st = choices.Rejected + b.vm.Rejected(ctx, b) + return nil +} + +// implements "snowman.Block.choices.Decidable" +func (b *StatelessBlock) Status() choices.Status { return b.st } + +// implements "snowman.Block" +func (b *StatelessBlock) Parent() ids.ID { return b.StatefulBlock.Prnt } + +// implements "snowman.Block" +func (b *StatelessBlock) Bytes() []byte { return b.bytes } + +// implements "snowman.Block" +func (b *StatelessBlock) Height() uint64 { return b.StatefulBlock.Hght } + +// implements "snowman.Block" +func (b *StatelessBlock) Timestamp() time.Time { return b.t } + +// Used to determine if should notify listeners and/or pass to controller +func (b *StatelessBlock) Processed() bool { + return b.view != nil +} + +// View returns the [merkledb.TrieView] of the block (representing the state +// post-execution) or returns the accepted state if the block is accepted or +// is height 0 (genesis). +// +// If [b.view] is nil (not processed), this function will either return an error or will +// run verification (depending on whether the height is in [acceptedState]). +// +// We still need to handle returning the accepted state here because +// the [VM] will call [View] on the preferred tip of the chain (whether or +// not it is accepted). +// +// Invariant: [View] with [verify] == true should not be called concurrently, otherwise, +// it will result in undefined behavior. +func (b *StatelessBlock) View(ctx context.Context, verify bool) (state.View, error) { + ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.View", + trace.WithAttributes( + attribute.Bool("processed", b.Processed()), + attribute.Bool("verify", verify), + ), + ) + defer span.End() + + // If this is the genesis block, return the base state. + if b.Hght == 0 { + return b.vm.State() + } + + // If block is processed, we can return either the accepted state + // or its pending view. + if b.Processed() { + if b.st == choices.Accepted { + // We assume that base state was properly updated if this + // block was accepted (this is not obvious because + // the accepted state may be that of the parent of the last + // accepted block right after state sync finishes). + return b.vm.State() + } + return b.view, nil + } + + // If the block is not processed but [acceptedState] equals the height + // of the block, we should return the accepted state. + // + // This can happen when we are building a child block immediately after + // restart (latest block will not be considered [Processed] because there + // will be no attached view from execution). + // + // We cannot use the merkle root to check against the accepted state + // because the block only contains the root of the parent block's post-execution. + if b.st == choices.Accepted { + acceptedState, err := b.vm.State() + if err != nil { + return nil, err + } + acceptedHeightRaw, err := acceptedState.Get(HeightKey(b.vm.StateManager().HeightKey())) + if err != nil { + return nil, err + } + acceptedHeight := binary.BigEndian.Uint64(acceptedHeightRaw) + if acceptedHeight == b.Hght { + b.vm.Logger().Info("accepted block not processed but found post-execution state on-disk", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Bool("verify", verify), + ) + return acceptedState, nil + } + b.vm.Logger().Info("accepted block not processed and does not match state on-disk", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Bool("verify", verify), + ) + } else { + b.vm.Logger().Info("block not processed", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Bool("verify", verify), + ) + } + if !verify { + return nil, ErrBlockNotProcessed + } + + // If there are no processing blocks when state sync finishes, + // the first block we attempt to verify will reach this execution + // path. + // + // In this scenario, the last accepted block will not be processed + // and [acceptedState] will correspond to the post-execution state + // of the new block's grandparent (our parent). To remedy this, + // we need to process this block to return a valid view. + b.vm.Logger().Info("verifying block when view requested", + zap.Uint64("height", b.Hght), + zap.Stringer("blkID", b.ID()), + zap.Bool("accepted", b.st == choices.Accepted), + ) + vctx, err := b.vm.GetVerifyContext(ctx, b.Hght, b.Prnt) + if err != nil { + b.vm.Logger().Error("unable to get verify context", zap.Error(err)) + return nil, err + } + if err := b.innerVerify(ctx, vctx); err != nil { + b.vm.Logger().Error("unable to verify block", zap.Error(err)) + return nil, err + } + if b.st != choices.Accepted { + return b.view, nil + } + + // If the block is already accepted, we should update + // the accepted state to ensure future calls to [View] + // return the correct state (now that the block is considered + // processed). + // + // It is not possible to reach this function if this block + // is not the child of the block whose post-execution state + // is currently stored on disk, so it is safe to call [CommitToDB]. + if err := b.view.CommitToDB(ctx); err != nil { + b.vm.Logger().Error("unable to commit to DB", zap.Error(err)) + return nil, err + } + return b.vm.State() +} + +// IsRepeat returns a bitset of all transactions that are considered repeats in +// the range that spans back to [oldestAllowed]. +// +// If [stop] is set to true, IsRepeat will return as soon as the first repeat +// is found (useful for block verification). +func (b *StatelessBlock) IsRepeat( + ctx context.Context, + oldestAllowed int64, + txs []*Transaction, + marker set.Bits, + stop bool, +) (set.Bits, error) { + ctx, span := b.vm.Tracer().Start(ctx, "StatelessBlock.IsRepeat") + defer span.End() + + // Early exit if we are already back at least [ValidityWindow] + // + // It is critical to ensure this logic is equivalent to [emap] to avoid + // non-deterministic verification. + if b.Tmstmp < oldestAllowed { + return marker, nil + } + + // If we are at an accepted block or genesis, we can use the emap on the VM + // instead of checking each block + if b.st == choices.Accepted || b.Hght == 0 /* genesis */ { + return b.vm.IsRepeat(ctx, txs, marker, stop), nil + } + + // Check if block contains any overlapping txs + for i, tx := range txs { + if marker.Contains(i) { + continue + } + if b.txsSet.Contains(tx.ID()) { + marker.Add(i) + if stop { + return marker, nil + } + } + } + prnt, err := b.vm.GetStatelessBlock(ctx, b.Prnt) + if err != nil { + return marker, err + } + return prnt.IsRepeat(ctx, oldestAllowed, txs, marker, stop) +} + +func (b *StatelessBlock) GetTxs() []*Transaction { + return b.Txs +} + +func (b *StatelessBlock) GetTimestamp() int64 { + return b.Tmstmp +} + +func (b *StatelessBlock) Results() []*Result { + return b.results +} + +func (b *StatelessBlock) FeeManager() *fees.Manager { + return b.feeManager +} + +func (b *StatefulBlock) Marshal() ([]byte, error) { + size := consts.IDLen + consts.Uint64Len + consts.Uint64Len + + consts.Uint64Len + window.WindowSliceSize + + consts.IntLen + codec.CummSize(b.Txs) + + consts.IDLen + consts.Uint64Len + consts.Uint64Len + + p := codec.NewWriter(size, consts.NetworkSizeLimit) + + p.PackID(b.Prnt) + p.PackInt64(b.Tmstmp) + p.PackUint64(b.Hght) + + p.PackInt(len(b.Txs)) + b.authCounts = map[uint8]int{} + for _, tx := range b.Txs { + if err := tx.Marshal(p); err != nil { + return nil, err + } + b.authCounts[tx.Auth.GetTypeID()]++ + } + + p.PackID(b.StateRoot) + p.PackUint64(uint64(b.WarpResults)) + bytes := p.Bytes() + if err := p.Err(); err != nil { + return nil, err + } + b.size = len(bytes) + return bytes, nil +} + +func UnmarshalBlock(raw []byte, parser Parser) (*StatefulBlock, error) { + var ( + p = codec.NewReader(raw, consts.NetworkSizeLimit) + b StatefulBlock + ) + b.size = len(raw) + + p.UnpackID(false, &b.Prnt) + b.Tmstmp = p.UnpackInt64(false) + b.Hght = p.UnpackUint64(false) + + // Parse transactions + txCount := p.UnpackInt(false) // can produce empty blocks + actionRegistry, authRegistry := parser.Registry() + b.Txs = []*Transaction{} // don't preallocate all to avoid DoS + b.authCounts = map[uint8]int{} + for i := 0; i < txCount; i++ { + tx, err := UnmarshalTx(p, actionRegistry, authRegistry) + if err != nil { + return nil, err + } + b.Txs = append(b.Txs, tx) + b.authCounts[tx.Auth.GetTypeID()]++ + } + + p.UnpackID(false, &b.StateRoot) + b.WarpResults = set.Bits64(p.UnpackUint64(false)) + + // Ensure no leftover bytes + if !p.Empty() { + return nil, fmt.Errorf("%w: remaining=%d", ErrInvalidObject, len(raw)-p.Offset()) + } + return &b, p.Err() +} + +type SyncableBlock struct { + *StatelessBlock +} + +func (sb *SyncableBlock) Accept(ctx context.Context) (block.StateSyncMode, error) { + return sb.vm.AcceptedSyncableBlock(ctx, sb) +} + +func NewSyncableBlock(sb *StatelessBlock) *SyncableBlock { + return &SyncableBlock{sb} +} + +func (sb *SyncableBlock) String() string { + return fmt.Sprintf("%d:%s root=%s", sb.Height(), sb.ID(), sb.StateRoot) +} + +// Testing +func (b *StatelessBlock) MarkUnprocessed() { + b.view = nil +} From f604320f4321d0c5fc4b8268129e67ef572bd0b5 Mon Sep 17 00:00:00 2001 From: francois Date: Mon, 22 Apr 2024 18:02:05 +0200 Subject: [PATCH 15/48] apply review changes: call root on view, nits --- merkle/merkle.go | 8 ++---- merkle/merkle_test.go | 57 +++++++++++++++++++++---------------------- 2 files changed, 30 insertions(+), 35 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index c2a7088fea..b5df63a5f1 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -5,14 +5,13 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { +func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { @@ -32,11 +31,8 @@ func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, tracer trac if err != nil { return ids.Empty, nil, err } - if err := view.CommitToDB(ctx); err != nil { - return ids.Empty, nil, err - } - root, err := db.GetMerkleRoot(ctx) + root, err := view.GetMerkleRoot(ctx) if err != nil { return ids.Empty, nil, err } diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 176411209c..99853f0b33 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/stretchr/testify/require" ) var resRoot ids.ID @@ -19,43 +20,41 @@ var resErr error func BenchmarkMerkleTxRoot(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000} { - ctx := context.TODO() - tracer := trace.Noop - merkleItems := make([][]byte, 0, size) - for i := 0; i < size; i++ { - item := make([]byte, 32) - _, err := rand.Read(item) - if err != nil { - b.Fatal(err) - } - merkleItems = append(merkleItems, item) - } - - var root ids.ID - var db merkledb.MerkleDB - var err error - - defaultConfig := merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - } + ctx := context.TODO() + tracer := trace.Noop + merkleItems := make([][]byte, 0, size) + for i := 0; i < size; i++ { + item := make([]byte, 32) + _, err := rand.Read(item) + require.NoError(b, err) + merkleItems = append(merkleItems, item) + } + + var root ids.ID + var db merkledb.MerkleDB + var err error + + defaultConfig := merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + } b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < size; i++ { - root, db, err = GenerateMerkleRoot(ctx, defaultConfig, tracer, merkleItems, false) + root, db, err = GenerateMerkleRoot(ctx, defaultConfig, merkleItems, false) } } }) - // avoid compiler optimizations to cancel out the bench - resRoot = root - resDb = db - resErr = err + // avoid compiler optimizations to cancel out the bench + resRoot = root + resDb = db + resErr = err } - b.ReportAllocs() + b.ReportAllocs() } From 40774f30187e91510c21d506f312e03e300206e5 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Fri, 26 Apr 2024 20:18:42 +0200 Subject: [PATCH 16/48] lint --- merkle/merkle_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 99853f0b33..63fa641a88 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,11 +1,10 @@ package merkle import ( - "testing" - "context" "crypto/rand" "strconv" + "testing" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" @@ -14,9 +13,11 @@ import ( "github.com/stretchr/testify/require" ) -var resRoot ids.ID -var resDb merkledb.MerkleDB -var resErr error +var ( + resRoot ids.ID + resDB merkledb.MerkleDB + resErr error +) func BenchmarkMerkleTxRoot(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000} { @@ -52,7 +53,7 @@ func BenchmarkMerkleTxRoot(b *testing.B) { // avoid compiler optimizations to cancel out the bench resRoot = root - resDb = db + resDB = db resErr = err } From c5565dd0fc1d2dcd6857e048d69af23463f784cc Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Fri, 26 Apr 2024 20:37:12 +0200 Subject: [PATCH 17/48] fix lints --- merkle/merkle.go | 6 +++++- merkle/merkle_test.go | 3 +++ x/programs/cmd/simulator/cmd/program.go | 1 - 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index b5df63a5f1..6a33cc490e 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -1,3 +1,6 @@ +// Copyright (C) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package merkle import ( @@ -5,9 +8,10 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/ava-labs/hypersdk/utils" - "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 63fa641a88..9c3fc7d630 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,3 +1,6 @@ +// Copyright (C) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package merkle import ( diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index d0c9a98307..7a2b33012b 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,7 +144,6 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() - if err != nil { return ids.Empty, nil, 0, err } From b86e5beddd6921022f991aaf9da6057e5585fbaa Mon Sep 17 00:00:00 2001 From: Franfran <51274081+iFrostizz@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:30:13 +0200 Subject: [PATCH 18/48] Cached state values in the program state (#840) * add cache * implement cache reads * actually cache reads * implement cache delete and handle borsch errors * use only one cache map * remove flushed bool * root of transactions by stateless merkledb * make root generation a function * preallocate memory for merkle array and consumebytes flag * add <*.code-workspace> to .gitignore and remove it from git commit * move root generation func to merkle package, tx root by items of [txID + result] * rebase & blk marshal/unmarshal & merkleroot to ids.ID * write benches for the merkle package * use crypto/rand, fix var name, report allocs * put the 10k bench back * pass config by parameter * happy clippy * borrow V * add TODO * Revert "pass config by parameter" This reverts commit 4aec58931b749fd9d4865c779d9ea43fead5694e. * Revert "put the 10k bench back" This reverts commit 058d7e70bb9007e407d8bbb578cae19c33f31cd1. * Revert "use crypto/rand, fix var name, report allocs" This reverts commit 214005b440230fa9f40ce353cb341d4b065e1f3c. * Revert "write benches for the merkle package" This reverts commit 07993bf41a7e206f8041dfce6f08aba05c81f33d. * Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID" This reverts commit 74428360e2bad4a6689688d4d70e9251105b1274. * Revert "move root generation func to merkle package, tx root by items of [txID + result]" This reverts commit e5519601c14267b5ea13f49cf7ac81e4d9d77415. * Revert "add <*.code-workspace> to .gitignore and remove it from git commit" This reverts commit ce0028973ea65c323238533972a53af4d75facc6. * Revert "preallocate memory for merkle array and consumebytes flag" This reverts commit 68e49b643185013da13b2528e7d0dea9d835858d. * Revert "make root generation a function" This reverts commit aa44f975003e03a628c5cbe0cdb25d7d33a4cb73. * Revert "pass config by parameter" This reverts commit 4aec58931b749fd9d4865c779d9ea43fead5694e. * Revert "move root generation func to merkle package, tx root by items of [txID + result]" This reverts commit e5519601c14267b5ea13f49cf7ac81e4d9d77415. * Revert "preallocate memory for merkle array and consumebytes flag" This reverts commit 68e49b643185013da13b2528e7d0dea9d835858d. * Revert "make root generation a function" This reverts commit aa44f975003e03a628c5cbe0cdb25d7d33a4cb73. * merge main! * merge imports --------- Co-authored-by: bianyuanop Co-authored-by: Richard Pringle --- x/programs/rust/examples/counter/src/lib.rs | 2 +- x/programs/rust/examples/token/src/lib.rs | 6 +- x/programs/rust/sdk_macros/src/lib.rs | 2 +- x/programs/rust/wasmlanche-sdk/src/program.rs | 8 +- x/programs/rust/wasmlanche-sdk/src/state.rs | 86 ++++++++++++++----- x/programs/rust/wasmlanche-sdk/src/types.rs | 2 +- 6 files changed, 76 insertions(+), 30 deletions(-) diff --git a/x/programs/rust/examples/counter/src/lib.rs b/x/programs/rust/examples/counter/src/lib.rs index 1ee7745922..fa366541cd 100644 --- a/x/programs/rust/examples/counter/src/lib.rs +++ b/x/programs/rust/examples/counter/src/lib.rs @@ -13,7 +13,7 @@ pub fn initialize_address(context: Context, address: Address) -> bool { if program .state() - .get::(StateKeys::Counter(address)) + .get::(StateKeys::Counter(address)) .is_ok() { panic!("counter already initialized for address") diff --git a/x/programs/rust/examples/token/src/lib.rs b/x/programs/rust/examples/token/src/lib.rs index 23251cabad..e523308554 100644 --- a/x/programs/rust/examples/token/src/lib.rs +++ b/x/programs/rust/examples/token/src/lib.rs @@ -59,7 +59,7 @@ pub fn mint_to(context: Context, recipient: Address, amount: i64) -> bool { let Context { program } = context; let balance = program .state() - .get::(StateKey::Balance(recipient)) + .get::(StateKey::Balance(recipient)) .unwrap_or_default(); program @@ -90,14 +90,14 @@ pub fn transfer(context: Context, sender: Address, recipient: Address, amount: i // ensure the sender has adequate balance let sender_balance = program .state() - .get::(StateKey::Balance(sender)) + .get::(StateKey::Balance(sender)) .expect("failed to update balance"); assert!(amount >= 0 && sender_balance >= amount, "invalid input"); let recipient_balance = program .state() - .get::(StateKey::Balance(recipient)) + .get::(StateKey::Balance(recipient)) .unwrap_or_default(); // update balances diff --git a/x/programs/rust/sdk_macros/src/lib.rs b/x/programs/rust/sdk_macros/src/lib.rs index ff8ac84299..c6c90441a3 100644 --- a/x/programs/rust/sdk_macros/src/lib.rs +++ b/x/programs/rust/sdk_macros/src/lib.rs @@ -155,7 +155,7 @@ pub fn state_keys(_attr: TokenStream, item: TokenStream) -> TokenStream { let mut item_enum = parse_macro_input!(item as ItemEnum); // add default attributes item_enum.attrs.push(syn::parse_quote! { - #[derive(Clone, Copy, Debug)] + #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] }); item_enum.attrs.push(syn::parse_quote! { #[repr(u8)] diff --git a/x/programs/rust/wasmlanche-sdk/src/program.rs b/x/programs/rust/wasmlanche-sdk/src/program.rs index c4598dddbe..8462f9ae4d 100644 --- a/x/programs/rust/wasmlanche-sdk/src/program.rs +++ b/x/programs/rust/wasmlanche-sdk/src/program.rs @@ -1,5 +1,8 @@ +use std::hash::Hash; + use borsh::{BorshDeserialize, BorshSerialize}; +use crate::state::Key; use crate::{memory::to_host_ptr, state::Error as StateError, state::State, Params}; /// Represents the current Program in the context of the caller. Or an external @@ -25,7 +28,10 @@ impl Program { /// Returns a State object that can be used to interact with persistent /// storage exposed by the host. #[must_use] - pub fn state(&self) -> State { + pub fn state(&self) -> State + where + K: Into + Hash + PartialEq + Eq + Clone, + { State::new(Program::new(*self.id())) } diff --git a/x/programs/rust/wasmlanche-sdk/src/state.rs b/x/programs/rust/wasmlanche-sdk/src/state.rs index eab090ec9e..0d6332c77b 100644 --- a/x/programs/rust/wasmlanche-sdk/src/state.rs +++ b/x/programs/rust/wasmlanche-sdk/src/state.rs @@ -1,6 +1,6 @@ -use crate::{memory::from_host_ptr, program::Program}; -use borsh::{BorshDeserialize, BorshSerialize}; -use std::ops::Deref; +use crate::{from_host_ptr, program::Program, state::Error as StateError}; +use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}; +use std::{collections::HashMap, hash::Hash, ops::Deref}; #[derive(Clone, thiserror::Error, Debug)] pub enum Error { @@ -38,14 +38,36 @@ pub enum Error { Delete, } -pub struct State { +pub struct State +where + K: Into + Hash + PartialEq + Eq + Clone, +{ program: Program, + cache: HashMap>, } -impl State { +impl Drop for State +where + K: Into + Hash + PartialEq + Eq + Clone, +{ + fn drop(&mut self) { + if !self.cache.is_empty() { + // force flush + self.flush().unwrap(); + } + } +} + +impl State +where + K: Into + Hash + PartialEq + Eq + Clone, +{ #[must_use] pub fn new(program: Program) -> Self { - Self { program } + Self { + program, + cache: HashMap::new(), + } } /// Store a key and value to the host storage. If the key already exists, @@ -53,12 +75,14 @@ impl State { /// # Errors /// Returns an [Error] if the key or value cannot be /// serialized or if the host fails to handle the operation. - pub fn store(&self, key: K, value: &V) -> Result<(), Error> + pub fn store(&mut self, key: K, value: &V) -> Result<(), Error> where V: BorshSerialize, - K: Into, { - unsafe { host::put_bytes(&self.program, &key.into(), value) } + let serialized = to_vec(&value).map_err(|_| StateError::Deserialization)?; + self.cache.insert(key, serialized); + + Ok(()) } /// Get a value from the host's storage. @@ -71,30 +95,46 @@ impl State { /// the host fails to read the key and value. /// # Panics /// Panics if the value cannot be converted from i32 to usize. - pub fn get(&self, key: K) -> Result + pub fn get(&mut self, key: K) -> Result where - K: Into, - T: BorshDeserialize, + V: BorshDeserialize, { - let val_ptr = unsafe { host::get_bytes(&self.program, &key.into())? }; - if val_ptr < 0 { - return Err(Error::Read); - } - - // Wrap in OK for now, change from_raw_ptr to return Result - from_host_ptr(val_ptr) + let val_bytes = if let Some(val) = self.cache.get(&key) { + val + } else { + let val_ptr = unsafe { host::get_bytes(&self.program, &key.clone().into())? }; + if val_ptr < 0 { + return Err(Error::Read); + } + + // TODO Wrap in OK for now, change from_raw_ptr to return Result + let bytes = from_host_ptr(val_ptr)?; + self.cache.entry(key).or_insert(bytes) + }; + + from_slice::(val_bytes).map_err(|_| StateError::Deserialization) } /// Delete a value from the hosts's storage. /// # Errors /// Returns an [Error] if the key cannot be serialized /// or if the host fails to delete the key and the associated value - pub fn delete(&self, key: K) -> Result<(), Error> - where - K: Into, - { + pub fn delete(&mut self, key: K) -> Result<(), Error> { + self.cache.remove(&key); + unsafe { host::delete_bytes(&self.program, &key.into()) } } + + /// Apply all pending operations to storage and mark the cache as flushed + fn flush(&mut self) -> Result<(), Error> { + for (key, value) in self.cache.drain() { + unsafe { + host::put_bytes(&self.program, &key.into(), &value)?; + } + } + + Ok(()) + } } /// Key is a wrapper around a `Vec` that represents a key in the host storage. diff --git a/x/programs/rust/wasmlanche-sdk/src/types.rs b/x/programs/rust/wasmlanche-sdk/src/types.rs index 0c69f8cd51..9edc9539d6 100644 --- a/x/programs/rust/wasmlanche-sdk/src/types.rs +++ b/x/programs/rust/wasmlanche-sdk/src/types.rs @@ -2,7 +2,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; /// A struct that enforces a fixed length of 32 bytes which represents an address. -#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize, Hash)] pub struct Address([u8; Self::LEN]); impl Address { From a781c032038da431ff3ab17b7a87f532c5eab07f Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:28:05 +0200 Subject: [PATCH 19/48] root of transactions by stateless merkledb --- chain/block.go | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/chain/block.go b/chain/block.go index fd97d86f1d..250354ae89 100644 --- a/chain/block.go +++ b/chain/block.go @@ -9,11 +9,14 @@ import ( "fmt" "time" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -38,7 +41,8 @@ type StatefulBlock struct { Tmstmp int64 `json:"timestamp"` Hght uint64 `json:"height"` - Txs []*Transaction `json:"txs"` + Txs []*Transaction `json:"txs"` + TxsRoot []byte `json:"txsRoot"` // StateRoot is the root of the post-execution state // of [Prnt]. @@ -245,6 +249,46 @@ func (b *StatelessBlock) initializeBuilt( for _, tx := range b.Txs { b.txsSet.Add(tx.ID()) } + + // transaction hash generation + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: b.vm.Tracer(), + }) + if err != nil { + return err + } + // collect keys, values from transactions/results + var ops []database.BatchOp + for _, tx := range b.Txs { + key := utils.ToID(tx.Bytes()) + ops = append(ops, database.BatchOp{ + Key: key[:], + Value: tx.Bytes(), + }) + } + for _, result := range b.results { + key := utils.ToID(result.Output) + ops = append(ops, database.BatchOp{ + Key: key[:], + Value: result.Output, + }) + } + view, err = db.NewView(ctx, merkledb.ViewChanges{BatchOps: ops}) + if err != nil { + return err + } + view.CommitToDB(ctx) + txsRoot, err := db.GetMerkleRoot(ctx) + if err != nil { + return err + } + b.TxsRoot = txsRoot[:] + return nil } From 76be5ddff56e73572575e67bdaec853b55866276 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:28:47 +0200 Subject: [PATCH 20/48] make root generation a function --- chain/block.go | 42 ++++++++++-------------------------------- utils/utils.go | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 32 deletions(-) diff --git a/chain/block.go b/chain/block.go index 250354ae89..56aea0ce17 100644 --- a/chain/block.go +++ b/chain/block.go @@ -9,14 +9,16 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" +<<<<<<< HEAD "github.com/ava-labs/avalanchego/utils/units" +======= + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +>>>>>>> 0804930f (make root generation a function) "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -251,43 +253,19 @@ func (b *StatelessBlock) initializeBuilt( } // transaction hash generation - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - EvictionBatchSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: b.vm.Tracer(), - }) - if err != nil { - return err - } - // collect keys, values from transactions/results - var ops []database.BatchOp + var merkleItems [][]byte for _, tx := range b.Txs { - key := utils.ToID(tx.Bytes()) - ops = append(ops, database.BatchOp{ - Key: key[:], - Value: tx.Bytes(), - }) + merkleItems = append(merkleItems, tx.Bytes()) } for _, result := range b.results { - key := utils.ToID(result.Output) - ops = append(ops, database.BatchOp{ - Key: key[:], - Value: result.Output, - }) + merkleItems = append(merkleItems, result.Output) } - view, err = db.NewView(ctx, merkledb.ViewChanges{BatchOps: ops}) - if err != nil { - return err - } - view.CommitToDB(ctx) - txsRoot, err := db.GetMerkleRoot(ctx) + + root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems) if err != nil { return err } - b.TxsRoot = txsRoot[:] + b.TxsRoot = root return nil } diff --git a/utils/utils.go b/utils/utils.go index c19c780abb..7c9851eced 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,6 +4,7 @@ package utils import ( + "context" "fmt" "math" "net" @@ -13,11 +14,16 @@ import ( "strconv" "time" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/perms" "github.com/onsi/ginkgo/v2/formatter" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/consts" ) @@ -117,3 +123,44 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } + +// Generate merkle root for a set of items +// this function does not take ownership of given bytes array +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte) ([]byte, merkledb.MerkleDB, error) { + var batchOps []database.BatchOp + + for _, item := range merkleItems { + key := ToID(item) + batchOps = append(batchOps, database.BatchOp{ + Key: key[:], + Value: item, + }) + } + + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + }) + if err != nil { + return nil, nil, err + } + + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps}) + if err != nil { + return nil, nil, err + } + if err := view.CommitToDB(ctx); err != nil { + return nil, nil, err + } + + root, err := db.GetMerkleRoot(ctx) + if err != nil { + return nil, nil, err + } + + return root[:], db, nil +} From b87d00247297f65ff9d79a7983834a40439edced Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:29:15 +0200 Subject: [PATCH 21/48] add <*.code-workspace> to .gitignore and remove it from git commit --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index fca0663d0c..d3aab7c4a3 100644 --- a/.gitignore +++ b/.gitignore @@ -69,4 +69,5 @@ target/ Cargo.lock **/*.rs.bk -x/programs/cmd/simulator/simulator \ No newline at end of file +x/programs/cmd/simulator/simulator +*.code-workspace From 987761aef2b7ba56b1bf596b167af4f08b895325 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:38:42 +0200 Subject: [PATCH 22/48] move root generation func to merkle package, tx root by items of [txID + result] --- chain/block.go | 24 +----------------------- utils/utils.go | 9 +++------ 2 files changed, 4 insertions(+), 29 deletions(-) diff --git a/chain/block.go b/chain/block.go index 56aea0ce17..fd97d86f1d 100644 --- a/chain/block.go +++ b/chain/block.go @@ -14,11 +14,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" -<<<<<<< HEAD - "github.com/ava-labs/avalanchego/utils/units" -======= - "github.com/ava-labs/avalanchego/vms/platformvm/warp" ->>>>>>> 0804930f (make root generation a function) "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -43,8 +38,7 @@ type StatefulBlock struct { Tmstmp int64 `json:"timestamp"` Hght uint64 `json:"height"` - Txs []*Transaction `json:"txs"` - TxsRoot []byte `json:"txsRoot"` + Txs []*Transaction `json:"txs"` // StateRoot is the root of the post-execution state // of [Prnt]. @@ -251,22 +245,6 @@ func (b *StatelessBlock) initializeBuilt( for _, tx := range b.Txs { b.txsSet.Add(tx.ID()) } - - // transaction hash generation - var merkleItems [][]byte - for _, tx := range b.Txs { - merkleItems = append(merkleItems, tx.Bytes()) - } - for _, result := range b.results { - merkleItems = append(merkleItems, result.Output) - } - - root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems) - if err != nil { - return err - } - b.TxsRoot = root - return nil } diff --git a/utils/utils.go b/utils/utils.go index 7c9851eced..6be3a31eb4 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,7 +4,6 @@ package utils import ( - "context" "fmt" "math" "net" @@ -14,16 +13,11 @@ import ( "strconv" "time" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/perms" "github.com/onsi/ginkgo/v2/formatter" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/consts" ) @@ -123,6 +117,7 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } +<<<<<<< HEAD // Generate merkle root for a set of items // this function does not take ownership of given bytes array @@ -164,3 +159,5 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] return root[:], db, nil } +======= +>>>>>>> b31136f4 (move root generation func to merkle package, tx root by items of [txID + result]) From e42d9f9917ce65a8cf2a97c744550430932440bd Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:40:41 +0200 Subject: [PATCH 23/48] Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID" This reverts commit f009424f24e9a01b4d1fb83492ad8fdfd533d071. focus changes on the bench --- .gitignore | 1 + merkle/merkle.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index d3aab7c4a3..2d307eb3ba 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,4 @@ Cargo.lock x/programs/cmd/simulator/simulator *.code-workspace +x/programs/cmd/simulator/simulator diff --git a/merkle/merkle.go b/merkle/merkle.go index 6a33cc490e..a5c77706cb 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -8,10 +8,15 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" +<<<<<<< HEAD "github.com/ava-labs/avalanchego/ids" +======= + "github.com/ava-labs/avalanchego/trace" +>>>>>>> f2bb2f97 (Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID") "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" + "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items From 941bd93058d8d869a47ce4fb8387b88b7772222a Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:36:15 +0200 Subject: [PATCH 24/48] sync with main --- .gitignore | 4 +--- x/programs/cmd/simulator/cmd/program.go | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 2d307eb3ba..fca0663d0c 100644 --- a/.gitignore +++ b/.gitignore @@ -69,6 +69,4 @@ target/ Cargo.lock **/*.rs.bk -x/programs/cmd/simulator/simulator -*.code-workspace -x/programs/cmd/simulator/simulator +x/programs/cmd/simulator/simulator \ No newline at end of file diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index 7a2b33012b..d0c9a98307 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,6 +144,7 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() + if err != nil { return ids.Empty, nil, 0, err } From c8868ca335f942d73084807ab477fdcb2f3b496f Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:37:01 +0200 Subject: [PATCH 25/48] lint --- x/programs/cmd/simulator/cmd/program.go | 1 - 1 file changed, 1 deletion(-) diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index d0c9a98307..7a2b33012b 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,7 +144,6 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() - if err != nil { return ids.Empty, nil, 0, err } From 87d1213cb1d0a38d15501c4333d2ad6f572b286c Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:42:14 +0200 Subject: [PATCH 26/48] preallocate memory for merkle array and consumebytes flag --- chain/block.go | 20 +++++++++++++++++++ hypersdk.code-workspace | 8 ++++++++ utils/utils.go | 44 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+) create mode 100644 hypersdk.code-workspace diff --git a/chain/block.go b/chain/block.go index fd97d86f1d..61cbf8737a 100644 --- a/chain/block.go +++ b/chain/block.go @@ -245,6 +245,26 @@ func (b *StatelessBlock) initializeBuilt( for _, tx := range b.Txs { b.txsSet.Add(tx.ID()) } +<<<<<<< HEAD +======= + + // transaction hash generation + merkleItems := make([][]byte, 0, len(b.Txs)+len(b.results)) + for _, tx := range b.Txs { + merkleItems = append(merkleItems, tx.Bytes()) + } + for _, result := range b.results { + merkleItems = append(merkleItems, result.Output) + } + + // consume bytes to avoid extra copying + root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems, true) + if err != nil { + return err + } + b.TxsRoot = root + +>>>>>>> eb41d9a7 (preallocate memory for merkle array and consumebytes flag) return nil } diff --git a/hypersdk.code-workspace b/hypersdk.code-workspace new file mode 100644 index 0000000000..876a1499c0 --- /dev/null +++ b/hypersdk.code-workspace @@ -0,0 +1,8 @@ +{ + "folders": [ + { + "path": "." + } + ], + "settings": {} +} \ No newline at end of file diff --git a/utils/utils.go b/utils/utils.go index c19c780abb..d76dfb8912 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -117,3 +117,47 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } +<<<<<<< HEAD +======= + +// Generate merkle root for a set of items +// this function does not take ownership of given bytes array +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { + batchOps := make([]database.BatchOp, 0, len(merkleItems)) + + for _, item := range merkleItems { + key := ToID(item) + batchOps = append(batchOps, database.BatchOp{ + Key: key[:], + Value: item, + }) + } + + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + }) + if err != nil { + return nil, nil, err + } + + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) + if err != nil { + return nil, nil, err + } + if err := view.CommitToDB(ctx); err != nil { + return nil, nil, err + } + + root, err := db.GetMerkleRoot(ctx) + if err != nil { + return nil, nil, err + } + + return root[:], db, nil +} +>>>>>>> eb41d9a7 (preallocate memory for merkle array and consumebytes flag) From 13e10c3bf914cd24bc0143c99148068095d211fa Mon Sep 17 00:00:00 2001 From: bianyuanop Date: Fri, 19 Jan 2024 17:28:24 -0500 Subject: [PATCH 27/48] remove wrong merkleroot docstring --- utils/utils.go | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/utils.go b/utils/utils.go index d76dfb8912..86b25121ba 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -121,7 +121,6 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { ======= // Generate merkle root for a set of items -// this function does not take ownership of given bytes array func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) From f12ba39859e0f6c0fa249de78dc577a4d6b2b0d8 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:42:27 +0200 Subject: [PATCH 28/48] add <*.code-workspace> to .gitignore and remove it from git commit --- hypersdk.code-workspace | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 hypersdk.code-workspace diff --git a/hypersdk.code-workspace b/hypersdk.code-workspace deleted file mode 100644 index 876a1499c0..0000000000 --- a/hypersdk.code-workspace +++ /dev/null @@ -1,8 +0,0 @@ -{ - "folders": [ - { - "path": "." - } - ], - "settings": {} -} \ No newline at end of file From d04658201eb72bc8f8474b8b79fe54653c268a85 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:42:44 +0200 Subject: [PATCH 29/48] move root generation func to merkle package, tx root by items of [txID + result] --- chain/block.go | 20 ------------------- merkle/merkle.go | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ utils/utils.go | 43 --------------------------------------- 3 files changed, 52 insertions(+), 63 deletions(-) create mode 100644 merkle/merkle.go diff --git a/chain/block.go b/chain/block.go index 61cbf8737a..fd97d86f1d 100644 --- a/chain/block.go +++ b/chain/block.go @@ -245,26 +245,6 @@ func (b *StatelessBlock) initializeBuilt( for _, tx := range b.Txs { b.txsSet.Add(tx.ID()) } -<<<<<<< HEAD -======= - - // transaction hash generation - merkleItems := make([][]byte, 0, len(b.Txs)+len(b.results)) - for _, tx := range b.Txs { - merkleItems = append(merkleItems, tx.Bytes()) - } - for _, result := range b.results { - merkleItems = append(merkleItems, result.Output) - } - - // consume bytes to avoid extra copying - root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems, true) - if err != nil { - return err - } - b.TxsRoot = root - ->>>>>>> eb41d9a7 (preallocate memory for merkle array and consumebytes flag) return nil } diff --git a/merkle/merkle.go b/merkle/merkle.go new file mode 100644 index 0000000000..53333b7692 --- /dev/null +++ b/merkle/merkle.go @@ -0,0 +1,52 @@ +package merkle + +import ( + "context" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/ava-labs/hypersdk/utils" +) + +// Generate merkle root for a set of items +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { + batchOps := make([]database.BatchOp, 0, len(merkleItems)) + + for _, item := range merkleItems { + key := utils.ToID(item) + batchOps = append(batchOps, database.BatchOp{ + Key: key[:], + Value: item, + }) + } + + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + }) + if err != nil { + return nil, nil, err + } + + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) + if err != nil { + return nil, nil, err + } + if err := view.CommitToDB(ctx); err != nil { + return nil, nil, err + } + + root, err := db.GetMerkleRoot(ctx) + if err != nil { + return nil, nil, err + } + + return root[:], db, nil +} diff --git a/utils/utils.go b/utils/utils.go index 86b25121ba..c19c780abb 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -117,46 +117,3 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } -<<<<<<< HEAD -======= - -// Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { - batchOps := make([]database.BatchOp, 0, len(merkleItems)) - - for _, item := range merkleItems { - key := ToID(item) - batchOps = append(batchOps, database.BatchOp{ - Key: key[:], - Value: item, - }) - } - - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - EvictionBatchSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - }) - if err != nil { - return nil, nil, err - } - - view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) - if err != nil { - return nil, nil, err - } - if err := view.CommitToDB(ctx); err != nil { - return nil, nil, err - } - - root, err := db.GetMerkleRoot(ctx) - if err != nil { - return nil, nil, err - } - - return root[:], db, nil -} ->>>>>>> eb41d9a7 (preallocate memory for merkle array and consumebytes flag) From 4b0f9b278b56ba31e0c510fdd497912bdd5d7c7d Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:43:02 +0200 Subject: [PATCH 30/48] rebase & blk marshal/unmarshal & merkleroot to ids.ID --- merkle/merkle.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index 53333b7692..e76e7efbad 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -5,6 +5,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" @@ -12,7 +13,7 @@ import ( ) // Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) ([]byte, merkledb.MerkleDB, error) { +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { @@ -26,27 +27,26 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ BranchFactor: merkledb.BranchFactor16, HistoryLength: 100, - EvictionBatchSize: units.MiB, IntermediateNodeCacheSize: units.MiB, ValueNodeCacheSize: units.MiB, Tracer: tracer, }) if err != nil { - return nil, nil, err + return ids.Empty, nil, err } view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps, ConsumeBytes: consumeBytes}) if err != nil { - return nil, nil, err + return ids.Empty, nil, err } if err := view.CommitToDB(ctx); err != nil { - return nil, nil, err + return ids.Empty, nil, err } root, err := db.GetMerkleRoot(ctx) if err != nil { - return nil, nil, err + return ids.Empty, nil, err } - return root[:], db, nil + return root, db, nil } From 46719d3424ab62f14cc6cbf975b30c2de7fff398 Mon Sep 17 00:00:00 2001 From: francois Date: Mon, 18 Mar 2024 17:31:30 +0100 Subject: [PATCH 31/48] write benches for the merkle package --- merkle/merkle_test.go | 44 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 merkle/merkle_test.go diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go new file mode 100644 index 0000000000..76e4c8b841 --- /dev/null +++ b/merkle/merkle_test.go @@ -0,0 +1,44 @@ +package merkle + +import ( + "encoding/binary" + "testing" + + "context" + "strconv" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +var res_root ids.ID +var res_db merkledb.MerkleDB +var res_err error + +func BenchmarkMerkleTxRoot(b *testing.B) { + for _, size := range []int{10, 100, 1000, 10000} { + ctx := context.TODO() + tracer := trace.Noop + merkleItems := make([][]byte, 0, size) + for i := 0; i < size; i++ { + as_bytes := make([]byte, 32) + binary.BigEndian.PutUint32(as_bytes, uint32(i)) + merkleItems = append(merkleItems, as_bytes) + } + var root ids.ID + var db merkledb.MerkleDB + var err error + b.Run(strconv.Itoa(size), func(b *testing.B) { + for n := 0; n < b.N; n++ { + for i := 0; i < size; i++ { + root, db, err = GenerateMerkleRoot(ctx, tracer, merkleItems, false) + } + } + }) + // avoid compiler optimizations to cancel out the bench + res_root = root + res_db = db + res_err = err + } +} From 2543ce8f3bb376951404c1819aad54cfe049c9b8 Mon Sep 17 00:00:00 2001 From: francois Date: Fri, 22 Mar 2024 19:53:06 +0100 Subject: [PATCH 32/48] use crypto/rand, fix var name, report allocs --- merkle/merkle_test.go | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 76e4c8b841..a90db3cc3e 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,9 +1,9 @@ package merkle import ( - "encoding/binary" "testing" + "crypto/rand" "context" "strconv" @@ -12,23 +12,30 @@ import ( "github.com/ava-labs/avalanchego/x/merkledb" ) -var res_root ids.ID -var res_db merkledb.MerkleDB -var res_err error +var resRoot ids.ID +var resDb merkledb.MerkleDB +var resErr error func BenchmarkMerkleTxRoot(b *testing.B) { - for _, size := range []int{10, 100, 1000, 10000} { + b.ReportAllocs() + + for _, size := range []int{10, 100, 1000} { ctx := context.TODO() tracer := trace.Noop merkleItems := make([][]byte, 0, size) for i := 0; i < size; i++ { - as_bytes := make([]byte, 32) - binary.BigEndian.PutUint32(as_bytes, uint32(i)) - merkleItems = append(merkleItems, as_bytes) + item := make([]byte, 32) + _, err := rand.Read(item) + if err != nil { + b.Fatal(err) + } + merkleItems = append(merkleItems, item) } + var root ids.ID var db merkledb.MerkleDB var err error + b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < size; i++ { @@ -36,9 +43,10 @@ func BenchmarkMerkleTxRoot(b *testing.B) { } } }) + // avoid compiler optimizations to cancel out the bench - res_root = root - res_db = db - res_err = err + resRoot = root + resDb = db + resErr = err } } From 645152705ee619f2d58b94afd7a36ae0176dded3 Mon Sep 17 00:00:00 2001 From: francois Date: Fri, 22 Mar 2024 19:56:44 +0100 Subject: [PATCH 33/48] put the 10k bench back --- merkle/merkle_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index a90db3cc3e..e5c391f833 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -17,9 +17,7 @@ var resDb merkledb.MerkleDB var resErr error func BenchmarkMerkleTxRoot(b *testing.B) { - b.ReportAllocs() - - for _, size := range []int{10, 100, 1000} { + for _, size := range []int{10, 100, 1000, 10000} { ctx := context.TODO() tracer := trace.Noop merkleItems := make([][]byte, 0, size) @@ -49,4 +47,6 @@ func BenchmarkMerkleTxRoot(b *testing.B) { resDb = db resErr = err } + + b.ReportAllocs() } From a25cae65f0dd85f72046a80d69fc91e426f8c742 Mon Sep 17 00:00:00 2001 From: francois Date: Sun, 31 Mar 2024 13:11:15 +0200 Subject: [PATCH 34/48] pass config by parameter --- merkle/merkle.go | 11 ++--------- merkle/merkle_test.go | 13 +++++++++++-- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index e76e7efbad..c2a493f74d 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -7,13 +7,12 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" ) // Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { +func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { @@ -24,13 +23,7 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] }) } - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - }) + db, err := merkledb.New(ctx, memdb.New(), config) if err != nil { return ids.Empty, nil, err } diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index e5c391f833..176411209c 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -3,12 +3,13 @@ package merkle import ( "testing" - "crypto/rand" "context" + "crypto/rand" "strconv" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" ) @@ -34,10 +35,18 @@ func BenchmarkMerkleTxRoot(b *testing.B) { var db merkledb.MerkleDB var err error + defaultConfig := merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + } + b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < size; i++ { - root, db, err = GenerateMerkleRoot(ctx, tracer, merkleItems, false) + root, db, err = GenerateMerkleRoot(ctx, defaultConfig, tracer, merkleItems, false) } } }) From 5d861d4beea99eee0d55299cf5dd8b83065c07ed Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:43:17 +0200 Subject: [PATCH 35/48] Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID" This reverts commit f009424f24e9a01b4d1fb83492ad8fdfd533d071. focus changes on the bench --- merkle/merkle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index c2a493f74d..c2a7088fea 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -5,10 +5,10 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" + "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items From 9c1b7cb671ac03e84cb989249ae17a6e35f88c2f Mon Sep 17 00:00:00 2001 From: francois Date: Mon, 22 Apr 2024 18:02:05 +0200 Subject: [PATCH 36/48] apply review changes: call root on view, nits --- merkle/merkle.go | 8 ++---- merkle/merkle_test.go | 57 +++++++++++++++++++++---------------------- 2 files changed, 30 insertions(+), 35 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index c2a7088fea..b5df63a5f1 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -5,14 +5,13 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items -func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, tracer trace.Tracer, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { +func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, merkleItems [][]byte, consumeBytes bool) (ids.ID, merkledb.MerkleDB, error) { batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { @@ -32,11 +31,8 @@ func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, tracer trac if err != nil { return ids.Empty, nil, err } - if err := view.CommitToDB(ctx); err != nil { - return ids.Empty, nil, err - } - root, err := db.GetMerkleRoot(ctx) + root, err := view.GetMerkleRoot(ctx) if err != nil { return ids.Empty, nil, err } diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 176411209c..99853f0b33 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/stretchr/testify/require" ) var resRoot ids.ID @@ -19,43 +20,41 @@ var resErr error func BenchmarkMerkleTxRoot(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000} { - ctx := context.TODO() - tracer := trace.Noop - merkleItems := make([][]byte, 0, size) - for i := 0; i < size; i++ { - item := make([]byte, 32) - _, err := rand.Read(item) - if err != nil { - b.Fatal(err) - } - merkleItems = append(merkleItems, item) - } - - var root ids.ID - var db merkledb.MerkleDB - var err error - - defaultConfig := merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - } + ctx := context.TODO() + tracer := trace.Noop + merkleItems := make([][]byte, 0, size) + for i := 0; i < size; i++ { + item := make([]byte, 32) + _, err := rand.Read(item) + require.NoError(b, err) + merkleItems = append(merkleItems, item) + } + + var root ids.ID + var db merkledb.MerkleDB + var err error + + defaultConfig := merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + } b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < size; i++ { - root, db, err = GenerateMerkleRoot(ctx, defaultConfig, tracer, merkleItems, false) + root, db, err = GenerateMerkleRoot(ctx, defaultConfig, merkleItems, false) } } }) - // avoid compiler optimizations to cancel out the bench - resRoot = root - resDb = db - resErr = err + // avoid compiler optimizations to cancel out the bench + resRoot = root + resDb = db + resErr = err } - b.ReportAllocs() + b.ReportAllocs() } From 45ffd216f2abf4635823484aca5811cb574730c2 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Fri, 26 Apr 2024 20:18:42 +0200 Subject: [PATCH 37/48] lint --- merkle/merkle_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 99853f0b33..63fa641a88 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,11 +1,10 @@ package merkle import ( - "testing" - "context" "crypto/rand" "strconv" + "testing" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" @@ -14,9 +13,11 @@ import ( "github.com/stretchr/testify/require" ) -var resRoot ids.ID -var resDb merkledb.MerkleDB -var resErr error +var ( + resRoot ids.ID + resDB merkledb.MerkleDB + resErr error +) func BenchmarkMerkleTxRoot(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000} { @@ -52,7 +53,7 @@ func BenchmarkMerkleTxRoot(b *testing.B) { // avoid compiler optimizations to cancel out the bench resRoot = root - resDb = db + resDB = db resErr = err } From e6f44fe390980e959fc78edee7f8e6bcd3e211c7 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Fri, 26 Apr 2024 20:37:12 +0200 Subject: [PATCH 38/48] fix lints --- merkle/merkle.go | 6 +++++- merkle/merkle_test.go | 3 +++ x/programs/cmd/simulator/cmd/program.go | 1 - 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index b5df63a5f1..6a33cc490e 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -1,3 +1,6 @@ +// Copyright (C) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package merkle import ( @@ -5,9 +8,10 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/ava-labs/hypersdk/utils" - "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items diff --git a/merkle/merkle_test.go b/merkle/merkle_test.go index 63fa641a88..9c3fc7d630 100644 --- a/merkle/merkle_test.go +++ b/merkle/merkle_test.go @@ -1,3 +1,6 @@ +// Copyright (C) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package merkle import ( diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index d0c9a98307..7a2b33012b 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,7 +144,6 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() - if err != nil { return ids.Empty, nil, 0, err } From 3f3c06b7bd722240a81c7ace37134839d753cb8f Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:28:05 +0200 Subject: [PATCH 39/48] root of transactions by stateless merkledb --- chain/block.go | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/chain/block.go b/chain/block.go index fd97d86f1d..250354ae89 100644 --- a/chain/block.go +++ b/chain/block.go @@ -9,11 +9,14 @@ import ( "fmt" "time" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -38,7 +41,8 @@ type StatefulBlock struct { Tmstmp int64 `json:"timestamp"` Hght uint64 `json:"height"` - Txs []*Transaction `json:"txs"` + Txs []*Transaction `json:"txs"` + TxsRoot []byte `json:"txsRoot"` // StateRoot is the root of the post-execution state // of [Prnt]. @@ -245,6 +249,46 @@ func (b *StatelessBlock) initializeBuilt( for _, tx := range b.Txs { b.txsSet.Add(tx.ID()) } + + // transaction hash generation + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: b.vm.Tracer(), + }) + if err != nil { + return err + } + // collect keys, values from transactions/results + var ops []database.BatchOp + for _, tx := range b.Txs { + key := utils.ToID(tx.Bytes()) + ops = append(ops, database.BatchOp{ + Key: key[:], + Value: tx.Bytes(), + }) + } + for _, result := range b.results { + key := utils.ToID(result.Output) + ops = append(ops, database.BatchOp{ + Key: key[:], + Value: result.Output, + }) + } + view, err = db.NewView(ctx, merkledb.ViewChanges{BatchOps: ops}) + if err != nil { + return err + } + view.CommitToDB(ctx) + txsRoot, err := db.GetMerkleRoot(ctx) + if err != nil { + return err + } + b.TxsRoot = txsRoot[:] + return nil } From 5dd3d08d4ed7c8c63de792fe69c50fcf23c9479a Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:28:47 +0200 Subject: [PATCH 40/48] make root generation a function --- chain/block.go | 42 ++++++++++-------------------------------- utils/utils.go | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 32 deletions(-) diff --git a/chain/block.go b/chain/block.go index 250354ae89..56aea0ce17 100644 --- a/chain/block.go +++ b/chain/block.go @@ -9,14 +9,16 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" +<<<<<<< HEAD "github.com/ava-labs/avalanchego/utils/units" +======= + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +>>>>>>> 0804930f (make root generation a function) "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -251,43 +253,19 @@ func (b *StatelessBlock) initializeBuilt( } // transaction hash generation - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - EvictionBatchSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: b.vm.Tracer(), - }) - if err != nil { - return err - } - // collect keys, values from transactions/results - var ops []database.BatchOp + var merkleItems [][]byte for _, tx := range b.Txs { - key := utils.ToID(tx.Bytes()) - ops = append(ops, database.BatchOp{ - Key: key[:], - Value: tx.Bytes(), - }) + merkleItems = append(merkleItems, tx.Bytes()) } for _, result := range b.results { - key := utils.ToID(result.Output) - ops = append(ops, database.BatchOp{ - Key: key[:], - Value: result.Output, - }) + merkleItems = append(merkleItems, result.Output) } - view, err = db.NewView(ctx, merkledb.ViewChanges{BatchOps: ops}) - if err != nil { - return err - } - view.CommitToDB(ctx) - txsRoot, err := db.GetMerkleRoot(ctx) + + root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems) if err != nil { return err } - b.TxsRoot = txsRoot[:] + b.TxsRoot = root return nil } diff --git a/utils/utils.go b/utils/utils.go index c19c780abb..7c9851eced 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,6 +4,7 @@ package utils import ( + "context" "fmt" "math" "net" @@ -13,11 +14,16 @@ import ( "strconv" "time" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/perms" "github.com/onsi/ginkgo/v2/formatter" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/consts" ) @@ -117,3 +123,44 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } + +// Generate merkle root for a set of items +// this function does not take ownership of given bytes array +func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte) ([]byte, merkledb.MerkleDB, error) { + var batchOps []database.BatchOp + + for _, item := range merkleItems { + key := ToID(item) + batchOps = append(batchOps, database.BatchOp{ + Key: key[:], + Value: item, + }) + } + + db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: 100, + EvictionBatchSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + ValueNodeCacheSize: units.MiB, + Tracer: tracer, + }) + if err != nil { + return nil, nil, err + } + + view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps}) + if err != nil { + return nil, nil, err + } + if err := view.CommitToDB(ctx); err != nil { + return nil, nil, err + } + + root, err := db.GetMerkleRoot(ctx) + if err != nil { + return nil, nil, err + } + + return root[:], db, nil +} From aeedd2f6c45b44e1235274fa1bc4b4a7bd0c83c8 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:29:15 +0200 Subject: [PATCH 41/48] add <*.code-workspace> to .gitignore and remove it from git commit --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index fca0663d0c..d3aab7c4a3 100644 --- a/.gitignore +++ b/.gitignore @@ -69,4 +69,5 @@ target/ Cargo.lock **/*.rs.bk -x/programs/cmd/simulator/simulator \ No newline at end of file +x/programs/cmd/simulator/simulator +*.code-workspace From ab71cb5b7fb7459e5a3cafe2be6406bbcf37cc2c Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:38:42 +0200 Subject: [PATCH 42/48] move root generation func to merkle package, tx root by items of [txID + result] --- chain/block.go | 24 +----------------------- utils/utils.go | 9 +++------ 2 files changed, 4 insertions(+), 29 deletions(-) diff --git a/chain/block.go b/chain/block.go index 56aea0ce17..fd97d86f1d 100644 --- a/chain/block.go +++ b/chain/block.go @@ -14,11 +14,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" -<<<<<<< HEAD - "github.com/ava-labs/avalanchego/utils/units" -======= - "github.com/ava-labs/avalanchego/vms/platformvm/warp" ->>>>>>> 0804930f (make root generation a function) "github.com/ava-labs/avalanchego/x/merkledb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -43,8 +38,7 @@ type StatefulBlock struct { Tmstmp int64 `json:"timestamp"` Hght uint64 `json:"height"` - Txs []*Transaction `json:"txs"` - TxsRoot []byte `json:"txsRoot"` + Txs []*Transaction `json:"txs"` // StateRoot is the root of the post-execution state // of [Prnt]. @@ -251,22 +245,6 @@ func (b *StatelessBlock) initializeBuilt( for _, tx := range b.Txs { b.txsSet.Add(tx.ID()) } - - // transaction hash generation - var merkleItems [][]byte - for _, tx := range b.Txs { - merkleItems = append(merkleItems, tx.Bytes()) - } - for _, result := range b.results { - merkleItems = append(merkleItems, result.Output) - } - - root, _, err := utils.GenerateMerkleRoot(ctx, b.vm.Tracer(), merkleItems) - if err != nil { - return err - } - b.TxsRoot = root - return nil } diff --git a/utils/utils.go b/utils/utils.go index 7c9851eced..6be3a31eb4 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -4,7 +4,6 @@ package utils import ( - "context" "fmt" "math" "net" @@ -14,16 +13,11 @@ import ( "strconv" "time" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/perms" "github.com/onsi/ginkgo/v2/formatter" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/consts" ) @@ -123,6 +117,7 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } +<<<<<<< HEAD // Generate merkle root for a set of items // this function does not take ownership of given bytes array @@ -164,3 +159,5 @@ func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [] return root[:], db, nil } +======= +>>>>>>> b31136f4 (move root generation func to merkle package, tx root by items of [txID + result]) From 9e3491bdb34c9177a383584f7e90cbf3e1687617 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:40:41 +0200 Subject: [PATCH 43/48] Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID" This reverts commit f009424f24e9a01b4d1fb83492ad8fdfd533d071. focus changes on the bench --- .gitignore | 1 + merkle/merkle.go | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index d3aab7c4a3..2d307eb3ba 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,4 @@ Cargo.lock x/programs/cmd/simulator/simulator *.code-workspace +x/programs/cmd/simulator/simulator diff --git a/merkle/merkle.go b/merkle/merkle.go index 6a33cc490e..a5c77706cb 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -8,10 +8,15 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" +<<<<<<< HEAD "github.com/ava-labs/avalanchego/ids" +======= + "github.com/ava-labs/avalanchego/trace" +>>>>>>> f2bb2f97 (Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID") "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" + "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items From 87e75c96c7c6acc9ae49b9cf4986676806a85e18 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:36:15 +0200 Subject: [PATCH 44/48] sync with main --- .gitignore | 4 +--- x/programs/cmd/simulator/cmd/program.go | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 2d307eb3ba..fca0663d0c 100644 --- a/.gitignore +++ b/.gitignore @@ -69,6 +69,4 @@ target/ Cargo.lock **/*.rs.bk -x/programs/cmd/simulator/simulator -*.code-workspace -x/programs/cmd/simulator/simulator +x/programs/cmd/simulator/simulator \ No newline at end of file diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index 7a2b33012b..d0c9a98307 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,6 +144,7 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() + if err != nil { return ids.Empty, nil, 0, err } From b503be39584aadb9c5ab055fbe39b5783b8d1617 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:37:01 +0200 Subject: [PATCH 45/48] lint --- x/programs/cmd/simulator/cmd/program.go | 1 - 1 file changed, 1 deletion(-) diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index d0c9a98307..7a2b33012b 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,7 +144,6 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() - if err != nil { return ids.Empty, nil, 0, err } From d46cdbd3d12e884a1d4f17cda693b9ef8064a61d Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Mon, 29 Apr 2024 21:45:56 +0200 Subject: [PATCH 46/48] sync main --- utils/utils.go | 44 ------------------------- x/programs/cmd/simulator/cmd/program.go | 1 + 2 files changed, 1 insertion(+), 44 deletions(-) diff --git a/utils/utils.go b/utils/utils.go index 6be3a31eb4..c19c780abb 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -117,47 +117,3 @@ func LoadBytes(filename string, expectedSize int) ([]byte, error) { } return bytes, nil } -<<<<<<< HEAD - -// Generate merkle root for a set of items -// this function does not take ownership of given bytes array -func GenerateMerkleRoot(ctx context.Context, tracer trace.Tracer, merkleItems [][]byte) ([]byte, merkledb.MerkleDB, error) { - var batchOps []database.BatchOp - - for _, item := range merkleItems { - key := ToID(item) - batchOps = append(batchOps, database.BatchOp{ - Key: key[:], - Value: item, - }) - } - - db, err := merkledb.New(ctx, memdb.New(), merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: 100, - EvictionBatchSize: units.MiB, - IntermediateNodeCacheSize: units.MiB, - ValueNodeCacheSize: units.MiB, - Tracer: tracer, - }) - if err != nil { - return nil, nil, err - } - - view, err := db.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps}) - if err != nil { - return nil, nil, err - } - if err := view.CommitToDB(ctx); err != nil { - return nil, nil, err - } - - root, err := db.GetMerkleRoot(ctx) - if err != nil { - return nil, nil, err - } - - return root[:], db, nil -} -======= ->>>>>>> b31136f4 (move root generation func to merkle package, tx root by items of [txID + result]) diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index 7a2b33012b..d0c9a98307 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,6 +144,7 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() + if err != nil { return ids.Empty, nil, 0, err } From 50eac97770a46a69401a35c6ef3479d11f7d31d8 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Fri, 3 May 2024 17:01:47 +0200 Subject: [PATCH 47/48] remove diff tag --- merkle/merkle.go | 5 ----- x/programs/cmd/simulator/cmd/program.go | 1 - 2 files changed, 6 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index a5c77706cb..6a33cc490e 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -8,15 +8,10 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" -<<<<<<< HEAD "github.com/ava-labs/avalanchego/ids" -======= - "github.com/ava-labs/avalanchego/trace" ->>>>>>> f2bb2f97 (Revert "rebase & blk marshal/unmarshal & merkleroot to ids.ID") "github.com/ava-labs/avalanchego/x/merkledb" "github.com/ava-labs/hypersdk/utils" - "github.com/ava-labs/avalanchego/ids" ) // Generate merkle root for a set of items diff --git a/x/programs/cmd/simulator/cmd/program.go b/x/programs/cmd/simulator/cmd/program.go index d0c9a98307..7a2b33012b 100644 --- a/x/programs/cmd/simulator/cmd/program.go +++ b/x/programs/cmd/simulator/cmd/program.go @@ -144,7 +144,6 @@ func programExecuteFunc( ) (ids.ID, []int64, uint64, error) { // simulate create program transaction programTxID, err := generateRandomID() - if err != nil { return ids.Empty, nil, 0, err } From 22d71c795348521b257a9ecd76568e35cb868a52 Mon Sep 17 00:00:00 2001 From: Francois Hardrouyere Date: Thu, 30 May 2024 17:01:27 +0200 Subject: [PATCH 48/48] don't hash key twice --- merkle/merkle.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/merkle/merkle.go b/merkle/merkle.go index 6a33cc490e..49383945db 100644 --- a/merkle/merkle.go +++ b/merkle/merkle.go @@ -10,8 +10,6 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/x/merkledb" - - "github.com/ava-labs/hypersdk/utils" ) // Generate merkle root for a set of items @@ -19,9 +17,8 @@ func GenerateMerkleRoot(ctx context.Context, config merkledb.Config, merkleItems batchOps := make([]database.BatchOp, 0, len(merkleItems)) for _, item := range merkleItems { - key := utils.ToID(item) batchOps = append(batchOps, database.BatchOp{ - Key: key[:], + Key: item, Value: item, }) }