Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge in latest (v1.10.25) geth changes #19

Merged
merged 29 commits into from
Sep 20, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
23ac8df
cmd. core: save preimages on genesis creation (#25538)
gballet Aug 18, 2022
cce7f08
rlp/rlpgen: fix error handling when target type not found (#25547)
jtraglia Aug 18, 2022
a1b8892
trie: improve node rlp decoding performance (#25357)
rjl493456442 Aug 18, 2022
2c5648d
all: fix some typos (#25551)
jtraglia Aug 19, 2022
fa1305f
internal/ethapi: fix comment typo (#25548)
ucwong Aug 19, 2022
32e8490
accounts/abi/bind/backends: typo fix (#25549)
ucwong Aug 19, 2022
656dc8c
eth, les: unlock downloader peerSet if there's an error (#25546)
jtraglia Aug 19, 2022
9762ddf
cmd/geth: parse uint64 value with ParseUint instead of Atoi (#25545)
jtraglia Aug 19, 2022
77308cd
consensus/beacon: check ttd reached on pos blocks (#25552)
MariusVanDerWijden Aug 19, 2022
36874b6
eth/filters: add global block logs cache (#25459)
s1na Aug 19, 2022
0865880
accounts/abi: fix set function (#25477)
zhiqiangxu Aug 19, 2022
ac7ad81
internal/ethapi: fix build regression (#25555)
fjl Aug 19, 2022
0ce494b
eth/fetcher: don't spend too much time on transaction inclusion (#25524)
holiman Aug 19, 2022
02418c2
Revert "eth/fetcher: don't spend too much time on transaction inclusi…
karalabe Aug 22, 2022
395f3d4
eth/catalyst: warn less frequently if no beacon client is available (…
karalabe Aug 22, 2022
2de49b0
params: release go-ethereum v1.10.22
fjl Aug 22, 2022
6d711f0
params: begin v1.10.23 release cycle
fjl Aug 22, 2022
81bd998
core, eth/downloader: handle spurious junk bodies from racey rollback…
karalabe Aug 23, 2022
5758d1f
core/state, trie: fix trie flush order for proper pruning
karalabe Aug 23, 2022
45a660a
consensus/beacon: don't ignore errors
holiman Aug 23, 2022
9ed10b9
Merge pull request #25581 from karalabe/triedb-fix-flush-order
karalabe Aug 23, 2022
4c114af
Merge pull request #25582 from holiman/err_handling
karalabe Aug 23, 2022
d901d85
params: release Geth v1.10.23
karalabe Aug 24, 2022
d0dc349
graphql: return correct logs for tx (#25612)
s1na Aug 31, 2022
3b41be6
graphql: fixes missing tx logs (#25745)
s1na Sep 13, 2022
972007a
Release Geth v1.10.24
karalabe Sep 14, 2022
8f61fc8
params: set TerminalTotalDifficultyPassed to true (#25769)
MariusVanDerWijden Sep 15, 2022
69568c5
params: release Geth v1.10.25
karalabe Sep 15, 2022
6569fd8
Merge tag 'v1.10.25' into eip-4844
roberto-bayardo Sep 19, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 14 additions & 19 deletions accounts/abi/bind/backends/simulated.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ type SimulatedBackend struct {
pendingState *state.StateDB // Currently pending state that will be the active on request
pendingReceipts types.Receipts // Currently receipts for the pending block

events *filters.EventSystem // Event system for filtering log events live
events *filters.EventSystem // for filtering log events live
filterSystem *filters.FilterSystem // for filtering database logs

config *params.ChainConfig
}
Expand All @@ -86,7 +87,11 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
blockchain: blockchain,
config: genesis.Config,
}
backend.events = filters.NewEventSystem(&filterBackend{database, blockchain, backend}, false)

filterBackend := &filterBackend{database, blockchain, backend}
backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
backend.events = filters.NewEventSystem(backend.filterSystem, false)

backend.rollback(blockchain.CurrentBlock())
return backend
}
Expand Down Expand Up @@ -609,7 +614,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// User specified the legacy gas field, convert to 1559 gas typing
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
} else {
// User specified 1559 gas feilds (or none), use those
// User specified 1559 gas fields (or none), use those
if call.GasFeeCap == nil {
call.GasFeeCap = new(big.Int)
}
Expand Down Expand Up @@ -689,7 +694,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
var filter *filters.Filter
if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain, b}, *query.BlockHash, query.Addresses, query.Topics)
filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics)
} else {
// Initialize unset filter boundaries to run from genesis to chain head
from := int64(0)
Expand All @@ -701,7 +706,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
to = query.ToBlock.Int64()
}
// Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain, b}, from, to, query.Addresses, query.Topics)
filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics)
}
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
Expand Down Expand Up @@ -828,7 +833,8 @@ type filterBackend struct {
backend *SimulatedBackend
}

func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }

func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }

func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
Expand All @@ -854,19 +860,8 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ
return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil
}

func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
return nil, nil
}
receipts := rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config())
if receipts == nil {
return nil, nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config())
return logs, nil
}

Expand Down
2 changes: 1 addition & 1 deletion accounts/abi/reflect.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
func set(dst, src reflect.Value) error {
dstType, srcType := dst.Type(), src.Type()
switch {
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid():
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()):
return set(dst.Elem(), src)
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
return set(dst.Elem(), src)
Expand Down
2 changes: 1 addition & 1 deletion accounts/abi/reflect_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ type reflectTest struct {

var reflectTests = []reflectTest{
{
name: "OneToOneCorrespondance",
name: "OneToOneCorrespondence",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
Expand Down
5 changes: 5 additions & 0 deletions accounts/abi/unpack_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,6 +352,11 @@ func TestMethodMultiReturn(t *testing.T) {
&[]interface{}{&expected.Int, &expected.String},
"",
"Can unpack into a slice",
}, {
&[]interface{}{&bigint, ""},
&[]interface{}{&expected.Int, expected.String},
"",
"Can unpack into a slice without indirection",
}, {
&[2]interface{}{&bigint, new(string)},
&[2]interface{}{&expected.Int, &expected.String},
Expand Down
2 changes: 1 addition & 1 deletion accounts/keystore/account_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
func TestUpdatedKeyfileContents(t *testing.T) {
t.Parallel()

// Create a temporary kesytore to test with
// Create a temporary keystore to test with
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
Expand Down
4 changes: 2 additions & 2 deletions accounts/keystore/file_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ type fileCache struct {
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
t0 := time.Now()

// List all the failes from the keystore folder
// List all the files from the keystore folder
files, err := os.ReadDir(keyDir)
if err != nil {
return nil, nil, nil, err
Expand All @@ -61,7 +61,7 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
log.Trace("Ignoring file on account scan", "path", path)
continue
}
// Gather the set of all and fresly modified files
// Gather the set of all and freshly modified files
all.Add(path)

info, err := fi.Info()
Expand Down
2 changes: 1 addition & 1 deletion accounts/keystore/keystore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ func TestSignRace(t *testing.T) {
// Tests that the wallet notifier loop starts and stops correctly based on the
// addition and removal of wallet event subscriptions.
func TestWalletNotifierLifecycle(t *testing.T) {
// Create a temporary kesytore to test with
// Create a temporary keystore to test with
_, ks := tmpKeyStore(t, false)

// Ensure that the notification updater is not running yet
Expand Down
4 changes: 2 additions & 2 deletions accounts/usbwallet/trezor.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,10 +196,10 @@ func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, er
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
return common.Address{}, err
}
if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary fomats
if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary formats
return common.BytesToAddress(addr), nil
}
if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal fomats
if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal formats
return common.HexToAddress(addr), nil
}
return common.Address{}, errors.New("missing derived address")
Expand Down
2 changes: 1 addition & 1 deletion accounts/usbwallet/wallet.go
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ func (w *wallet) selfDerive() {
// of legacy-ledger, the first account on the legacy-path will
// be shown to the user, even if we don't actively track it
if i < len(nextAddrs)-1 {
w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
w.log.Info("Skipping tracking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
"path", path, "address", nextAddrs[i])
break
}
Expand Down
2 changes: 1 addition & 1 deletion build/ci.go
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,7 @@ func doDocker(cmdline []string) {
}
if mismatch {
// Build numbers mismatching, retry in a short time to
// avoid concurrent failes in both publisher images. If
// avoid concurrent fails in both publisher images. If
// however the retry failed too, it means the concurrent
// builder is still crunching, let that do the publish.
if i == 0 {
Expand Down
2 changes: 1 addition & 1 deletion cmd/faucet/faucet.go
Original file line number Diff line number Diff line change
Expand Up @@ -709,7 +709,7 @@ func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, c
case tokenV2 != "":
return authTwitterWithTokenV2(tweetID, tokenV2)
}
// Twiter API token isn't provided so we just load the public posts
// Twitter API token isn't provided so we just load the public posts
// and scrape it for the Ethereum address and profile URL. We need to load
// the mobile page though since the main page loads tweet contents via JS.
url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1)
Expand Down
6 changes: 3 additions & 3 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -384,12 +384,12 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
}
} else {
number, err := strconv.Atoi(arg)
number, err := strconv.ParseUint(arg, 10, 64)
if err != nil {
return nil, nil, common.Hash{}, err
}
if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
header = rawdb.ReadHeader(db, hash, uint64(number))
if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
header = rawdb.ReadHeader(db, hash, number)
} else {
return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
}
Expand Down
11 changes: 9 additions & 2 deletions cmd/geth/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
override := ctx.Bool(utils.OverrideTerminalTotalDifficultyPassed.Name)
cfg.Eth.OverrideTerminalTotalDifficultyPassed = &override
}

backend, eth := utils.RegisterEthService(stack, &cfg.Eth)

// Warn users to migrate if they have a legacy freezer format.
if eth != nil && !ctx.IsSet(utils.IgnoreLegacyReceiptsFlag.Name) {
firstIdx := uint64(0)
Expand All @@ -181,10 +183,15 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
utils.Fatalf("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
}
}
// Configure GraphQL if requested

// Configure log filter RPC API.
filterSystem := utils.RegisterFilterAPI(stack, backend, &cfg.Eth)

// Configure GraphQL if requested.
if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
utils.RegisterGraphQLService(stack, backend, cfg.Node)
utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node)
}

// Add the Ethereum Stats daemon if requested.
if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
Expand Down
2 changes: 1 addition & 1 deletion cmd/geth/consolecmd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ To exit, press ctrl-d or type exit
}

// trulyRandInt generates a crypto random integer used by the console tests to
// not clash network ports with other tests running cocurrently.
// not clash network ports with other tests running concurrently.
func trulyRandInt(lo, hi int) int {
num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo)))
return int(num.Int64()) + lo
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ var (
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.CacheLogSizeFlag,
utils.FDLimitFlag,
utils.ListenPortFlag,
utils.DiscoveryPortFlag,
Expand Down
2 changes: 1 addition & 1 deletion cmd/puppeth/ssh.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
return nil
}
// We have a mismatch, forbid connecting
return errors.New("ssh key mismatch, readd the machine to update")
return errors.New("ssh key mismatch, re-add the machine to update")
}
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
if err != nil {
Expand Down
34 changes: 29 additions & 5 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb"
Expand All @@ -64,6 +65,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem"
"github.com/urfave/cli/v2"
Expand Down Expand Up @@ -496,6 +498,12 @@ var (
Usage: "Enable recording the SHA3/keccak preimages of trie keys",
Category: flags.PerfCategory,
}
CacheLogSizeFlag = &cli.IntFlag{
Name: "cache.blocklogs",
Usage: "Size (in number of blocks) of the log cache for filtering",
Category: flags.PerfCategory,
Value: ethconfig.Defaults.FilterLogCacheSize,
}
FDLimitFlag = &cli.IntFlag{
Name: "fdlimit",
Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
Expand Down Expand Up @@ -1821,6 +1829,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheSnapshotFlag.Name) {
cfg.SnapshotCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheSnapshotFlag.Name) / 100
}
if ctx.IsSet(CacheLogSizeFlag.Name) {
cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
}
if !ctx.Bool(SnapshotFlag.Name) {
// If snap-sync is requested, this flag is also required
if cfg.SyncMode == downloader.SnapSync {
Expand Down Expand Up @@ -2024,21 +2035,34 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
return backend.APIBackend, backend
}

// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to
// the given node.
// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to the node.
func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) {
if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil {
Fatalf("Failed to register the Ethereum Stats service: %v", err)
}
}

// RegisterGraphQLService is a utility function to construct a new service and register it against a node.
func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.Config) {
if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil {
// RegisterGraphQLService adds the GraphQL API to the node.
func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cfg *node.Config) {
err := graphql.New(stack, backend, filterSystem, cfg.GraphQLCors, cfg.GraphQLVirtualHosts)
if err != nil {
Fatalf("Failed to register the GraphQL service: %v", err)
}
}

// RegisterFilterAPI adds the eth log filtering RPC API to the node.
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
isLightClient := ethcfg.SyncMode == downloader.LightSync
filterSystem := filters.NewFilterSystem(backend, filters.Config{
LogCacheSize: ethcfg.FilterLogCacheSize,
})
stack.RegisterAPIs([]rpc.API{{
Namespace: "eth",
Service: filters.NewFilterAPI(filterSystem, isLightClient),
}})
return filterSystem
}

func SetupMetrics(ctx *cli.Context) {
if metrics.Enabled {
log.Info("Enabling metrics collection")
Expand Down
4 changes: 2 additions & 2 deletions common/prque/prque.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ func (p *Prque) Push(data interface{}, priority int64) {
heap.Push(p.cont, &item{data, priority})
}

// Peek returns the value with the greates priority but does not pop it off.
// Peek returns the value with the greatest priority but does not pop it off.
func (p *Prque) Peek() (interface{}, int64) {
item := p.cont.blocks[0][0]
return item.value, item.priority
}

// Pops the value with the greates priority off the stack and returns it.
// Pops the value with the greatest priority off the stack and returns it.
// Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, int64) {
item := heap.Pop(p.cont).(*item)
Expand Down
18 changes: 16 additions & 2 deletions consensus/beacon/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,10 @@ func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
// VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum consensus engine.
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
reached, _ := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
if err != nil {
return err
}
if !reached {
return beacon.ethone.VerifyHeader(chain, header, seal)
}
Expand Down Expand Up @@ -114,8 +117,19 @@ func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
}
}

// All the headers have passed the transition point, use new rules.
if len(preHeaders) == 0 {
// All the headers are pos headers. Verify that the parent block reached total terminal difficulty.
if reached, err := IsTTDReached(chain, headers[0].ParentHash, headers[0].Number.Uint64()-1); !reached {
// TTD not reached for the first block, mark subsequent with invalid terminal block
if err == nil {
err = consensus.ErrInvalidTerminalBlock
}
results := make(chan error, len(headers))
for i := 0; i < len(headers); i++ {
results <- err
}
return make(chan struct{}), results
}
return beacon.verifyHeaders(chain, headers, nil)
}

Expand Down
Loading