diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 60f2a87933..9f7d6a50f9 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -422,6 +422,9 @@ proc initFullNode( DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64 else: CUSTODY_REQUIREMENT.uint64 + custody_columns_set = + node.network.nodeId.get_custody_columns_set(max(SAMPLES_PER_SLOT.uint64, + localCustodySubnets)) consensusManager = ConsensusManager.new( dag, attestationPool, quarantine, node.elManager, ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), @@ -532,8 +535,8 @@ proc initFullNode( processor: processor, network: node.network) requestManager = RequestManager.init( - node.network, supernode, dag.cfg.DENEB_FORK_EPOCH, getBeaconTime, - (proc(): bool = syncManager.inProgress), + node.network, supernode, custody_columns_set, dag.cfg.DENEB_FORK_EPOCH, + getBeaconTime, (proc(): bool = syncManager.inProgress), quarantine, blobQuarantine, dataColumnQuarantine, rmanBlockVerifier, rmanBlockLoader, rmanBlobLoader, rmanDataColumnLoader) @@ -559,7 +562,7 @@ proc initFullNode( dataColumnQuarantine[].supernode = supernode dataColumnQuarantine[].custody_columns = node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64, - localCustodySubnets)) + localCustodySubnets)) if node.config.subscribeAllSubnets: node.network.loadCscnetMetadataAndEnr(DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint8) diff --git a/beacon_chain/spec/eip7594_helpers.nim b/beacon_chain/spec/eip7594_helpers.nim index 0c489dfa4e..4d3cdf0613 100644 --- a/beacon_chain/spec/eip7594_helpers.nim +++ b/beacon_chain/spec/eip7594_helpers.nim @@ -35,6 +35,16 @@ func sortedColumnIndices*(columnsPerSubnet: ColumnIndex, res.sort res +func sortedColumnIndicesToHashSet*(columnsPerSubnet: ColumnIndex, + subnetIds: HashSet[uint64]): + HashSet[ColumnIndex] = + var res: HashSet[ColumnIndex] = initHashSet[ColumnIndex]() + for i in 0'u64 ..< columnsPerSubnet: + for subnetId in subnetIds: + let index = DATA_COLUMN_SIDECAR_SUBNET_COUNT * i + subnetId + res.incl(ColumnIndex(index)) # Add to HashSet + res + func sortedColumnIndexList*(columnsPerSubnet: ColumnIndex, subnetIds: HashSet[uint64]): List[ColumnIndex, NUMBER_OF_COLUMNS] = @@ -93,6 +103,20 @@ func get_custody_columns*(node_id: NodeId, sortedColumnIndices(ColumnIndex(columns_per_subnet), subnet_ids) +func get_custody_columns_set*(node_id: NodeId, + custody_subnet_count: uint64): + HashSet[ColumnIndex] = + # This method returns a HashSet of column indices, + # the method is specifically relevant while peer filtering + let + subnet_ids = + get_custody_column_subnets(node_id, custody_subnet_count) + const + columns_per_subnet = + NUMBER_OF_COLUMNS div DATA_COLUMN_SIDECAR_SUBNET_COUNT + + sortedColumnIndicesToHashSet(ColumnIndex(columns_per_subnet), subnet_ids) + func get_custody_column_list*(node_id: NodeId, custody_subnet_count: uint64): List[ColumnIndex, NUMBER_OF_COLUMNS] = diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index 66771931b0..913ac09f13 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -64,6 +64,7 @@ type RequestManager* = object network*: Eth2Node supernode*: bool + custody_columns_set: HashSet[ColumnIndex] getBeaconTime: GetBeaconTimeFn inhibit: InhibitFn quarantine: ref Quarantine @@ -85,6 +86,7 @@ func shortLog*(x: seq[FetchRecord]): string = proc init*(T: type RequestManager, network: Eth2Node, supernode: bool, + custody_columns_set: HashSet[ColumnIndex], denebEpoch: Epoch, getBeaconTime: GetBeaconTimeFn, inhibit: InhibitFn, @@ -98,6 +100,7 @@ proc init*(T: type RequestManager, network: Eth2Node, RequestManager( network: network, supernode: supernode, + custody_columns_set: custody_columns_set, getBeaconTime: getBeaconTime, inhibit: inhibit, quarantine: quarantine, @@ -293,12 +296,6 @@ proc checkPeerCustody*(rman: RequestManager, elif peer.lookupCscFromPeer() == CUSTODY_REQUIREMENT.uint64: - # Fetch local custody column - let - localNodeId = rman.network.nodeId - localCustodyColumns = - localNodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64, - CUSTODY_REQUIREMENT.uint64)) # Fetch the remote custody count let remoteCustodySubnetCount = @@ -309,10 +306,10 @@ proc checkPeerCustody*(rman: RequestManager, let remoteNodeId = fetchNodeIdFromPeerId(peer) remoteCustodyColumns = - remoteNodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64, + remoteNodeId.get_custody_columns_set(max(SAMPLES_PER_SLOT.uint64, remoteCustodySubnetCount)) - for local_column in localCustodyColumns: + for local_column in rman.custody_columns_set: if local_column notin remoteCustodyColumns: return false @@ -556,15 +553,7 @@ proc getMissingDataColumns(rman: RequestManager): seq[DataColumnIdentifier] = commitments = len(forkyBlck.message.body.blob_kzg_commitments) for idx in missing.indices: let id = DataColumnIdentifier(block_root: columnless.root, index: idx) - let local_csc = - if rman.supernode: - DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64 - else: - CUSTODY_REQUIREMENT.uint64 - let local_custody = - rman.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64, - local_csc)) - if id.index in local_custody and id notin fetches and + if id.index in rman.custody_columns_set and id notin fetches and len(forkyBlck.message.body.blob_kzg_commitments) != 0: fetches.add(id) else: