Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

taskpools: bump #6757

Merged
merged 4 commits into from
Dec 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion beacon_chain/gossip_processing/batch_validation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ type

proc new*(
T: type BatchCrypto, rng: ref HmacDrbgContext,
eager: Eager, genesis_validators_root: Eth2Digest, taskpool: TaskPoolPtr):
eager: Eager, genesis_validators_root: Eth2Digest, taskpool: Taskpool):
Result[ref BatchCrypto, string] =
let res = (ref BatchCrypto)(
rng: rng, taskpool: taskpool,
Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/gossip_processing/eth2_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ proc new*(T: type Eth2Processor,
blobQuarantine: ref BlobQuarantine,
rng: ref HmacDrbgContext,
getBeaconTime: GetBeaconTimeFn,
taskpool: TaskPoolPtr
taskpool: Taskpool
): ref Eth2Processor =
(ref Eth2Processor)(
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled,
Expand Down
39 changes: 20 additions & 19 deletions beacon_chain/nimbus_beacon_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ proc initFullNode(
rng: ref HmacDrbgContext,
dag: ChainDAGRef,
clist: ChainListRef,
taskpool: TaskPoolPtr,
taskpool: Taskpool,
getBeaconTime: GetBeaconTimeFn) {.async.} =
template config(): auto = node.config

Expand Down Expand Up @@ -417,7 +417,7 @@ proc initFullNode(
blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded))
dataColumnQuarantine = newClone(DataColumnQuarantine.init())
supernode = node.config.subscribeAllSubnets
localCustodySubnets =
localCustodySubnets =
if supernode:
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
else:
Expand Down Expand Up @@ -529,28 +529,28 @@ proc initFullNode(
(proc(): bool = syncManager.inProgress),
quarantine, blobQuarantine, rmanBlockVerifier,
rmanBlockLoader, rmanBlobLoader)
# As per EIP 7594, the BN is now categorised into a

# As per EIP 7594, the BN is now categorised into a
# `Fullnode` and a `Supernode`, the fullnodes custodies a
# given set of data columns, and hence ONLY subcribes to those
# data column subnet topics, however, the supernodes subscribe
# to all of the topics. This in turn keeps our `data column quarantine`
# really variable. Whenever the BN is a supernode, column quarantine
# essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the
# essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the
# spec. However, in terms of fullnode, quarantine is really dependent
# on the randomly assigned columns, by `get_custody_columns`.

# Hence, in order to keep column quarantine accurate and error proof
# the custody columns are computed once as the BN boots. Then the values
# are used globally around the codebase.
# are used globally around the codebase.

# `get_custody_columns` is not a very expensive function, but there
# are multiple instances of computing custody columns, especially
# are multiple instances of computing custody columns, especially
# during peer selection, sync with columns, and so on. That is why,
# the rationale of populating it at boot and using it gloabally.

dataColumnQuarantine[].supernode = supernode
dataColumnQuarantine[].custody_columns =
dataColumnQuarantine[].custody_columns =
node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64,
localCustodySubnets))
if node.config.lightClientDataServe:
Expand Down Expand Up @@ -654,7 +654,6 @@ proc init*(T: type BeaconNode,
metadata: Eth2NetworkMetadata): Future[BeaconNode]
{.async.} =
var
taskpool: TaskPoolPtr
genesisState: ref ForkedHashedBeaconState = nil

template cfg: auto = metadata.cfg
Expand Down Expand Up @@ -690,18 +689,20 @@ proc init*(T: type BeaconNode,
altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH
quit 1

try:
if config.numThreads < 0:
fatal "The number of threads --numThreads cannot be negative."
let taskpool =
try:
if config.numThreads < 0:
fatal "The number of threads --num-threads cannot be negative."
quit 1
elif config.numThreads == 0:
Taskpool.new(numThreads = min(countProcessors(), 16))
else:
Taskpool.new(numThreads = config.numThreads)
except CatchableError as e:
fatal "Cannot start taskpool", err = e.msg
quit 1
elif config.numThreads == 0:
taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16))
else:
taskpool = TaskPoolPtr.new(numThreads = config.numThreads)

info "Threadpool started", numThreads = taskpool.numThreads
except Exception:
raise newException(Defect, "Failure in taskpool initialization.")
info "Threadpool started", numThreads = taskpool.numThreads

if metadata.genesis.kind == BakedIn:
if config.genesisState.isSome:
Expand Down
8 changes: 3 additions & 5 deletions beacon_chain/spec/signatures_batch.nim
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,16 @@ import
export results, rand, altair, phase0, taskpools, signatures

type
TaskPoolPtr* = Taskpool

BatchVerifier* = object
sigVerifCache*: BatchedBLSVerifierCache
## A cache for batch BLS signature verification contexts
rng*: ref HmacDrbgContext
## A reference to the Nimbus application-wide RNG
taskpool*: TaskPoolPtr
taskpool*: Taskpool

proc init*(
T: type BatchVerifier, rng: ref HmacDrbgContext,
taskpool: TaskPoolPtr): BatchVerifier =
taskpool: Taskpool): BatchVerifier =
BatchVerifier(
sigVerifCache: BatchedBLSVerifierCache.init(taskpool),
rng: rng,
Expand All @@ -46,7 +44,7 @@ proc init*(

proc new*(
T: type BatchVerifier, rng: ref HmacDrbgContext,
taskpool: TaskPoolPtr): ref BatchVerifier =
taskpool: Taskpool): ref BatchVerifier =
(ref BatchVerifier)(
sigVerifCache: BatchedBLSVerifierCache.init(taskpool),
rng: rng,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ proc runTest(suiteName, path: string, consensusFork: static ConsensusFork) =
lcDataConfig = LightClientDataConfig(
serve: true, importMode: LightClientDataImportMode.Full))
rng = HmacDrbgContext.new()
taskpool = TaskPool.new()
taskpool = Taskpool.new()
var
verifier = BatchVerifier.init(rng, taskpool)
quarantine = newClone(Quarantine.init())
Expand Down
Loading