Skip to content

Commit

Permalink
Merge upstream PR ethereum#27875
Browse files Browse the repository at this point in the history
miner: add to build block with EIP-4844 blobs

 Conflicts:
  miner/worker.go
simple conflict because we added a return param. accepted both changes.
  • Loading branch information
tsahee committed Jan 17, 2024
2 parents 9e4f4f3 + feb8f41 commit c399c12
Show file tree
Hide file tree
Showing 32 changed files with 259 additions and 188 deletions.
20 changes: 4 additions & 16 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ jobs:
go: 1.21.x
env:
- azure-linux
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
addons:
Expand Down Expand Up @@ -100,7 +99,6 @@ jobs:
go: 1.21.x
env:
- azure-osx
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
Expand All @@ -113,30 +111,24 @@ jobs:
arch: amd64
dist: bionic
go: 1.21.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES

- stage: build
if: type = pull_request
os: linux
arch: arm64
dist: bionic
go: 1.20.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES

- stage: build
os: linux
dist: bionic
go: 1.20.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES

# This builder does the Ubuntu PPA nightly uploads
- stage: build
Expand All @@ -146,7 +138,6 @@ jobs:
go: 1.21.x
env:
- ubuntu-ppa
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
addons:
Expand All @@ -170,7 +161,6 @@ jobs:
go: 1.21.x
env:
- azure-purge
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
Expand All @@ -182,8 +172,6 @@ jobs:
os: linux
dist: bionic
go: 1.21.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -race $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES

4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

GOBIN = ./build/bin
GO ?= latest
GORUN = env GO111MODULE=on go run
GORUN = go run

geth:
$(GORUN) build/ci.go install ./cmd/geth
Expand All @@ -23,7 +23,7 @@ lint: ## Run linters.
$(GORUN) build/ci.go lint

clean:
env GO111MODULE=on go clean -cache
go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/*

# The devtools target installs tools required for 'go generate'.
Expand Down
7 changes: 4 additions & 3 deletions accounts/abi/method.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,11 +127,12 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
state = state + " "
}
identity := fmt.Sprintf("function %v", rawName)
if funType == Fallback {
switch funType {
case Fallback:
identity = "fallback"
} else if funType == Receive {
case Receive:
identity = "receive"
} else if funType == Constructor {
case Constructor:
identity = "constructor"
}
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
Expand Down
7 changes: 4 additions & 3 deletions accounts/abi/method_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,12 @@ func TestMethodString(t *testing.T) {

for _, test := range table {
var got string
if test.method == "fallback" {
switch test.method {
case "fallback":
got = abi.Fallback.String()
} else if test.method == "receive" {
case "receive":
got = abi.Receive.String()
} else {
default:
got = abi.Methods[test.method].String()
}
if got != test.expectation {
Expand Down
7 changes: 4 additions & 3 deletions accounts/abi/unpack.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,13 +160,14 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// this value will become our slice or our array, depending on the type
var refSlice reflect.Value

if t.T == SliceTy {
switch t.T {
case SliceTy:
// declare our slice
refSlice = reflect.MakeSlice(t.GetType(), size, size)
} else if t.T == ArrayTy {
case ArrayTy:
// declare our array
refSlice = reflect.New(t.GetType()).Elem()
} else {
default:
return nil, errors.New("abi: invalid type in array/slice unpacking stage")
}

Expand Down
17 changes: 9 additions & 8 deletions beacon/engine/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/trie"
)

Expand Down Expand Up @@ -237,7 +236,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash)

// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int, blobs []kzg4844.Blob, commitments []kzg4844.Commitment, proofs []kzg4844.Proof) *ExecutionPayloadEnvelope {
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
Expand All @@ -258,17 +257,19 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, blobs []kzg4844.Bl
ExcessBlobGas: block.ExcessBlobGas(),
// TODO BeaconRoot
}
blobsBundle := BlobsBundleV1{
bundle := BlobsBundleV1{
Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0),
Proofs: make([]hexutil.Bytes, 0),
}
for i := range blobs {
blobsBundle.Blobs = append(blobsBundle.Blobs, hexutil.Bytes(blobs[i][:]))
blobsBundle.Commitments = append(blobsBundle.Commitments, hexutil.Bytes(commitments[i][:]))
blobsBundle.Proofs = append(blobsBundle.Proofs, hexutil.Bytes(proofs[i][:]))
for _, sidecar := range sidecars {
for j := range sidecar.Blobs {
bundle.Blobs = append(bundle.Blobs, hexutil.Bytes(sidecar.Blobs[j][:]))
bundle.Commitments = append(bundle.Commitments, hexutil.Bytes(sidecar.Commitments[j][:]))
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &blobsBundle}
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle}
}

// ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
Expand Down
6 changes: 3 additions & 3 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -2073,10 +2073,10 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
// tryMakeReadOnlyDatabase try to open the chain database in read-only mode,
// or fallback to write mode if the database is not initialized.
func tryMakeReadOnlyDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
// If datadir doesn't exist we need to open db in write-mode
// so database engine can create files.
// If the database doesn't exist we need to open it in write-mode to allow
// the engine to create files.
readonly := true
if !common.FileExist(stack.ResolvePath("chaindata")) {
if rawdb.PreexistingDatabase(stack.ResolvePath("chaindata")) == "" {
readonly = false
}
return MakeChainDatabase(ctx, stack, readonly)
Expand Down
4 changes: 2 additions & 2 deletions consensus/misc/eip4844/eip4844.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ func VerifyEIP4844Header(parent, header *types.Header) error {
return errors.New("header is missing blobGasUsed")
}
// Verify that the blob gas used remains within reasonable limits.
if *header.BlobGasUsed > params.BlobTxMaxBlobGasPerBlock {
return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.BlobTxMaxBlobGasPerBlock)
if *header.BlobGasUsed > params.MaxBlobGasPerBlock {
return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.MaxBlobGasPerBlock)
}
if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
Expand Down
3 changes: 3 additions & 0 deletions core/blockchain_repair_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1767,6 +1767,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
Expand Down Expand Up @@ -1851,6 +1852,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
Expand Down Expand Up @@ -1972,6 +1974,7 @@ func testIssue23496(t *testing.T, scheme string) {
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
Expand Down
1 change: 1 addition & 0 deletions core/blockchain_sethead_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1971,6 +1971,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
Expand Down
2 changes: 2 additions & 0 deletions core/blockchain_snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
Expand Down Expand Up @@ -258,6 +259,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
newdb, err := rawdb.Open(rawdb.OpenOptions{
Directory: snaptest.datadir,
AncientsDirectory: snaptest.ancient,
Ephemeral: true,
})
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
Expand Down
13 changes: 8 additions & 5 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,10 +326,10 @@ const (
dbLeveldb = "leveldb"
)

// hasPreexistingDb checks the given data directory whether a database is already
// PreexistingDatabase checks the given data directory whether a database is already
// instantiated at that location, and if so, returns the type of database (or the
// empty string).
func hasPreexistingDb(path string) string {
func PreexistingDatabase(path string) string {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return "" // No pre-existing db
}
Expand All @@ -352,6 +352,9 @@ type OpenOptions struct {
Cache int // the capacity(in megabytes) of the data caching
Handles int // number of files to be open simultaneously
ReadOnly bool
// Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of
// a crash is not important. This option should typically be used in tests.
Ephemeral bool
}

// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble.
Expand All @@ -367,14 +370,14 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
}
// Retrieve any pre-existing database's type and use that or the requested one
// as long as there's no conflict between the two types
existingDb := hasPreexistingDb(o.Directory)
existingDb := PreexistingDatabase(o.Directory)
if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb {
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
}
if o.Type == dbPebble || existingDb == dbPebble {
if PebbleEnabled {
log.Info("Using pebble as the backing database")
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
} else {
return nil, errors.New("db.engine 'pebble' not supported on this platform")
}
Expand All @@ -387,7 +390,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
// on supported platforms and LevelDB on anything else.
if PebbleEnabled {
log.Info("Defaulting to pebble as the backing database")
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
} else {
log.Info("Defaulting to leveldb as the backing database")
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
Expand Down
4 changes: 2 additions & 2 deletions core/rawdb/databases_64bit.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ const PebbleEnabled = true

// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly)
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion core/rawdb/databases_non64bit.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,6 @@ const PebbleEnabled = false

// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
return nil, errors.New("pebble is not supported on this platform")
}
12 changes: 10 additions & 2 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,11 @@ func accountSnapshotKey(hash common.Hash) []byte {

// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...)
buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
n := copy(buf, SnapshotStoragePrefix)
n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], storageHash.Bytes())
return buf
}

// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
Expand Down Expand Up @@ -259,7 +263,11 @@ func accountTrieNodeKey(path []byte) []byte {

// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path))
n := copy(buf, trieNodeStoragePrefix)
n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], path)
return buf
}

// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
Expand Down
Loading

0 comments on commit c399c12

Please sign in to comment.