From 796112d0d6ca78e5cf25b1d8fc79d62b4e2ce4c9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 21 Dec 2023 18:19:47 +0200 Subject: [PATCH 01/31] Defer new peer actions in `ChainSync` --- .../network/sync/src/strategy/chain_sync.rs | 256 ++++++++++-------- 1 file changed, 143 insertions(+), 113 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 62c260d582b5..af8d6ef5c9d5 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -331,6 +331,8 @@ struct ForkTarget { /// defines what we are busy with. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub(crate) enum PeerSyncState { + /// New peer not yet handled by sync state machine. + New, /// Available for sync requests. Available, /// Searching for ancestors the Peer has in common with us. @@ -468,130 +470,153 @@ where /// Notify syncing state machine that a new sync peer has connected. pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - match self.add_peer_inner(peer_id, best_hash, best_number) { - Ok(Some(request)) => - self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), - Ok(None) => {}, - Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), - } + self.peers.insert( + peer_id, + PeerSync { + peer_id, + common_number: Zero::zero(), + best_hash, + best_number, + state: PeerSyncState::New, + }, + ); + + // match self.add_peer_inner(peer_id, best_hash, best_number) { + // Ok(Some(request)) => + // self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), + // Ok(None) => {}, + // Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), + // } } - #[must_use] - fn add_peer_inner( + fn handle_new_peers( &mut self, - peer_id: PeerId, - best_hash: B::Hash, - best_number: NumberFor, - ) -> Result>, BadPeer> { - // There is nothing sync can get from the node that has no blockchain data. - match self.block_status(&best_hash) { - Err(e) => { - debug!(target: LOG_TARGET, "Error reading blockchain: {e}"); - Err(BadPeer(peer_id, rep::BLOCKCHAIN_READ_ERROR)) - }, - Ok(BlockStatus::KnownBad) => { - info!( - "💔 New peer {peer_id} with known bad best block {best_hash} ({best_number})." - ); - Err(BadPeer(peer_id, rep::BAD_BLOCK)) - }, - Ok(BlockStatus::Unknown) => { - if best_number.is_zero() { + ) -> impl Iterator), BadPeer>> + '_ { + let new_peers = self + .peers + .iter() + .filter_map(|(peer_id, peer)| match peer.state { + PeerSyncState::New => Some((*peer_id, peer.clone())), + _ => None, + }) + .collect::)>>(); + + new_peers.into_iter().filter_map(|(peer_id, peer)| { + let best_hash = peer.best_hash; + let best_number = peer.best_number; + // There is nothing sync can get from the node that has no blockchain data. + match self.block_status(&best_hash) { + Err(e) => { + debug!(target: LOG_TARGET, "Error reading blockchain: {e}"); + Some(Err(BadPeer(peer_id, rep::BLOCKCHAIN_READ_ERROR))) + }, + Ok(BlockStatus::KnownBad) => { info!( - "💔 New peer {} with unknown genesis hash {} ({}).", - peer_id, best_hash, best_number, + "💔 New peer {peer_id} with known bad best block {best_hash} ({best_number})." ); - return Err(BadPeer(peer_id, rep::GENESIS_MISMATCH)) - } + Some(Err(BadPeer(peer_id, rep::BAD_BLOCK))) + }, + Ok(BlockStatus::Unknown) => { + if best_number.is_zero() { + info!( + "💔 New peer {} with unknown genesis hash {} ({}).", + peer_id, best_hash, best_number, + ); + return Some(Err(BadPeer(peer_id, rep::GENESIS_MISMATCH))) + } - // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have - // enough to do in the import queue that it's not worth kicking off - // an ancestor search, which is what we do in the next match case below. - if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { - debug!( - target: LOG_TARGET, - "New peer {} with unknown best hash {} ({}), assuming common block.", - peer_id, - self.best_queued_hash, - self.best_queued_number - ); + // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we + // have enough to do in the import queue that it's not worth kicking off + // an ancestor search, which is what we do in the next match case below. + if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { + debug!( + target: LOG_TARGET, + "New peer {} with unknown best hash {} ({}), assuming common block.", + peer_id, + self.best_queued_hash, + self.best_queued_number + ); + // Replace `PeerSyncState::New` peer. + self.peers.insert( + peer_id, + PeerSync { + peer_id, + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); + return None + } + + // If we are at genesis, just start downloading. + let (state, req) = if self.best_queued_number.is_zero() { + debug!( + target: LOG_TARGET, + "New peer {peer_id} with best hash {best_hash} ({best_number}).", + ); + + (PeerSyncState::Available, None) + } else { + let common_best = std::cmp::min(self.best_queued_number, best_number); + + debug!( + target: LOG_TARGET, + "New peer {} with unknown best hash {} ({}), searching for common ancestor.", + peer_id, + best_hash, + best_number + ); + + ( + PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + Some(ancestry_request::(common_best)), + ) + }; + + self.allowed_requests.add(&peer_id); + // Replace `PeerSyncState::New` peer. self.peers.insert( peer_id, PeerSync { peer_id, - common_number: self.best_queued_number, + common_number: Zero::zero(), best_hash, best_number, - state: PeerSyncState::Available, + state, }, ); - return Ok(None) - } - // If we are at genesis, just start downloading. - let (state, req) = if self.best_queued_number.is_zero() { + req.map(|req| Ok((peer_id, req))) + }, + Ok(BlockStatus::Queued) | + Ok(BlockStatus::InChainWithState) | + Ok(BlockStatus::InChainPruned) => { debug!( target: LOG_TARGET, - "New peer {peer_id} with best hash {best_hash} ({best_number}).", + "New peer {peer_id} with known best hash {best_hash} ({best_number}).", ); - - (PeerSyncState::Available, None) - } else { - let common_best = std::cmp::min(self.best_queued_number, best_number); - - debug!( - target: LOG_TARGET, - "New peer {} with unknown best hash {} ({}), searching for common ancestor.", + // Replace `PeerSyncState::New` peer. + self.peers.insert( peer_id, - best_hash, - best_number - ); - - ( - PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), + PeerSync { + peer_id, + common_number: std::cmp::min(self.best_queued_number, best_number), + best_hash, + best_number, + state: PeerSyncState::Available, }, - Some(ancestry_request::(common_best)), - ) - }; - - self.allowed_requests.add(&peer_id); - self.peers.insert( - peer_id, - PeerSync { - peer_id, - common_number: Zero::zero(), - best_hash, - best_number, - state, - }, - ); - - Ok(req) - }, - Ok(BlockStatus::Queued) | - Ok(BlockStatus::InChainWithState) | - Ok(BlockStatus::InChainPruned) => { - debug!( - target: LOG_TARGET, - "New peer {peer_id} with known best hash {best_hash} ({best_number}).", - ); - self.peers.insert( - peer_id, - PeerSync { - peer_id, - common_number: std::cmp::min(self.best_queued_number, best_number), - best_hash, - best_number, - state: PeerSyncState::Available, - }, - ); - self.allowed_requests.add(&peer_id); - Ok(None) - }, - } + ); + self.allowed_requests.add(&peer_id); + None + }, + } + }) } /// Inform sync about a new best imported block. @@ -887,6 +912,7 @@ where return Ok(()) } }, + PeerSyncState::New | PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | PeerSyncState::DownloadingState => Vec::new(), @@ -1330,16 +1356,11 @@ where } // handle peers that were in other states. - let action = match self.add_peer_inner(peer_id, p.best_hash, p.best_number) { - // since the request is not a justification, remove it from pending responses - Ok(None) => ChainSyncAction::CancelBlockRequest { peer_id }, - // update the request if the new one is available - Ok(Some(request)) => ChainSyncAction::SendBlockRequest { peer_id, request }, - // this implies that we need to drop pending response from the peer - Err(bad_peer) => ChainSyncAction::DropPeer(bad_peer), - }; + self.add_peer(peer_id, p.best_hash, p.best_number); - self.actions.push(action); + // since the request is not a justification, remove it from pending responses + // TODO: check `PeerSyncState`? + self.actions.push(ChainSyncAction::CancelBlockRequest { peer_id }); }); } @@ -1863,6 +1884,15 @@ where /// Get pending actions to perform. #[must_use] pub fn actions(&mut self) -> impl Iterator> { + let new_peer_actions = self + .handle_new_peers() + .map(|res| match res { + Ok((peer_id, request)) => ChainSyncAction::SendBlockRequest { peer_id, request }, + Err(bad_peer) => ChainSyncAction::DropPeer(bad_peer), + }) + .collect::>>(); + self.actions.extend(new_peer_actions.into_iter()); + let block_requests = self .block_requests() .into_iter() From 125f9a740ec9dafcb4630b70f6c67ae609f48065 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 25 Dec 2023 12:41:40 +0200 Subject: [PATCH 02/31] Introduce `PeerPool` for per-strategy peer allocation --- Cargo.lock | 1 + substrate/client/network/sync/Cargo.toml | 1 + substrate/client/network/sync/src/strategy.rs | 73 ++++++++++++++++++- .../network/sync/src/strategy/chain_sync.rs | 7 -- 4 files changed, 73 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b20adebf963a..917997ec2831 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16201,6 +16201,7 @@ dependencies = [ "log", "mockall", "parity-scale-codec", + "parking_lot 0.12.1", "prost", "prost-build", "quickcheck", diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index dd993e7a3df3..352f1351c87d 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -28,6 +28,7 @@ futures-timer = "3.0.2" libp2p = "0.51.4" log = "0.4.17" mockall = "0.11.3" +parking_lot = "0.12.1" prost = "0.11" schnellru = "0.2.1" smallvec = "1.11.0" diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index ee99252fc911..34ee5a03c02e 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -30,7 +30,8 @@ use crate::{ }; use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; use libp2p::PeerId; -use log::{error, info}; +use log::{error, info, warn}; +use parking_lot::Mutex; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -45,7 +46,7 @@ use sp_runtime::{ Justifications, }; use state::{StateStrategy, StateStrategyAction}; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use warp::{EncodedProof, WarpProofRequest, WarpSync, WarpSyncAction, WarpSyncConfig}; /// Corresponding `ChainSync` mode. @@ -71,6 +72,74 @@ pub struct SyncingConfig { pub metrics_registry: Option, } +enum PeerStatus { + Available, + Reserved, +} + +impl PeerStatus { + fn is_available(&self) -> bool { + matches!(self, PeerStatus::Available) + } +} + +#[derive(Clone, Default)] +struct PeerPool { + peers: Arc>>, +} + +impl PeerPool { + fn add_peer(&self, peer_id: PeerId) { + self.peers.lock().insert(peer_id, PeerStatus::Available); + } + + fn remove_peer(&self, peer_id: &PeerId) { + self.peers.lock().remove(peer_id); + } + + fn available_peers(&self) -> Vec { + self.peers + .lock() + .iter() + .filter_map( + |(peer_id, status)| if status.is_available() { Some(*peer_id) } else { None }, + ) + .collect() + } + + fn try_reserve_peer(&self, peer_id: &PeerId) -> bool { + match self.peers.lock().get_mut(peer_id) { + Some(peer_status) => match peer_status { + PeerStatus::Available => { + *peer_status = PeerStatus::Reserved; + true + }, + PeerStatus::Reserved => false, + }, + None => { + warn!(target: LOG_TARGET, "Trying to reserve unknown peer {peer_id}."); + false + }, + } + } + + fn free_peer(&self, peer_id: &PeerId) { + match self.peers.lock().get_mut(peer_id) { + Some(peer_status) => match peer_status { + PeerStatus::Available => { + warn!(target: LOG_TARGET, "Trying to free available peer {peer_id}.") + }, + PeerStatus::Reserved => { + *peer_status = PeerStatus::Available; + }, + }, + None => { + warn!(target: LOG_TARGET, "Trying to free unknown peer {peer_id}."); + }, + } + } +} + #[derive(Debug)] pub enum SyncingAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index af8d6ef5c9d5..df29a2abc466 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -480,13 +480,6 @@ where state: PeerSyncState::New, }, ); - - // match self.add_peer_inner(peer_id, best_hash, best_number) { - // Ok(Some(request)) => - // self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), - // Ok(None) => {}, - // Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), - // } } fn handle_new_peers( From be88848ea174e7f19bcaa723360bec2c489901d8 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 25 Dec 2023 16:38:40 +0200 Subject: [PATCH 03/31] Prepare `SyncingStrategy` for parallel strategies --- substrate/client/network/sync/src/engine.rs | 56 +-- substrate/client/network/sync/src/strategy.rs | 465 ++++++++++-------- .../client/network/sync/src/strategy/state.rs | 5 - 3 files changed, 278 insertions(+), 248 deletions(-) diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 7486c091ebf1..1787acbe2c07 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -214,9 +214,6 @@ pub struct SyncingEngine { /// Syncing strategy. strategy: SyncingStrategy, - /// Syncing configuration for startegies. - syncing_config: SyncingConfig, - /// Blockchain client. client: Arc, @@ -441,8 +438,7 @@ where .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); // Initialize syncing strategy. - let strategy = - SyncingStrategy::new(syncing_config.clone(), client.clone(), warp_sync_config)?; + let strategy = SyncingStrategy::new(syncing_config, client.clone(), warp_sync_config)?; let block_announce_protocol_name = block_announce_config.protocol_name().clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); @@ -471,7 +467,6 @@ where roles, client, strategy, - syncing_config, network_service, peers: HashMap::new(), block_announce_data_cache: LruMap::new(ByLength::new(cache_capacity)), @@ -661,8 +656,15 @@ where Some(event) => self.process_notification_event(event), None => return, }, - warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => - self.pass_warp_sync_target_block_header(warp_target_block_header), + warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => { + if let Err(_) = self.pass_warp_sync_target_block_header(warp_target_block_header) { + error!( + target: LOG_TARGET, + "Failed to set warp sync target block header, terminating `SyncingEngine`.", + ); + return + } + }, response_event = self.pending_responses.select_next_some() => self.process_response_event(response_event), validation_result = self.block_announce_validator.select_next_some() => @@ -675,14 +677,17 @@ where // Process actions requested by a syncing strategy. if let Err(e) = self.process_strategy_actions() { - error!("Terminating `SyncingEngine` due to fatal error: {e:?}"); + error!( + target: LOG_TARGET, + "Terminating `SyncingEngine` due to fatal error: {e:?}.", + ); return } } } fn process_strategy_actions(&mut self) -> Result<(), ClientError> { - for action in self.strategy.actions() { + for action in self.strategy.actions()? { match action { SyncingAction::SendBlockRequest { peer_id, request } => { // Sending block request implies dropping obsolete pending response as we are @@ -753,20 +758,6 @@ where number, ) }, - SyncingAction::Finished => { - let connected_peers = self.peers.iter().filter_map(|(peer_id, peer)| { - peer.info.roles.is_full().then_some(( - *peer_id, - peer.info.best_hash, - peer.info.best_number, - )) - }); - self.strategy.switch_to_next( - self.syncing_config.clone(), - self.client.clone(), - connected_peers, - )?; - }, } } @@ -948,23 +939,18 @@ where } } - fn pass_warp_sync_target_block_header(&mut self, header: Result) { + fn pass_warp_sync_target_block_header( + &mut self, + header: Result, + ) -> Result<(), ()> { match header { - Ok(header) => - if let SyncingStrategy::WarpSyncStrategy(warp_sync) = &mut self.strategy { - warp_sync.set_target_block(header); - } else { - error!( - target: LOG_TARGET, - "Cannot set warp sync target block: no warp sync strategy is active." - ); - debug_assert!(false); - }, + Ok(header) => self.strategy.set_warp_sync_target_block_header(header), Err(err) => { error!( target: LOG_TARGET, "Failed to get target block for warp sync. Error: {err:?}", ); + Err(()) }, } } diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 34ee5a03c02e..721571a7db58 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -161,15 +161,17 @@ pub enum SyncingAction { number: NumberFor, justifications: Justifications, }, - /// Syncing strategy has finished. - Finished, } /// Proxy to specific syncing strategies. -pub enum SyncingStrategy { - WarpSyncStrategy(WarpSync), - StateSyncStrategy(StateStrategy), - ChainSyncStrategy(ChainSync), +pub struct SyncingStrategy { + config: SyncingConfig, + client: Arc, + warp: Option>, + state: Option>, + chain_sync: Option>, + connected_peers_pool: PeerPool, + connected_peers_best: HashMap)>, } impl SyncingStrategy @@ -192,37 +194,56 @@ where if let SyncMode::Warp = config.mode { let warp_sync_config = warp_sync_config .expect("Warp sync configuration must be supplied in warp sync mode."); - Ok(Self::WarpSyncStrategy(WarpSync::new(client.clone(), warp_sync_config))) + let warp_sync = WarpSync::new(client.clone(), warp_sync_config); + Ok(Self { + config, + client, + warp: Some(warp_sync), + state: None, + chain_sync: None, + connected_peers_pool: Default::default(), + connected_peers_best: Default::default(), + }) } else { - Ok(Self::ChainSyncStrategy(ChainSync::new( + let chain_sync = ChainSync::new( chain_sync_mode(config.mode), client.clone(), config.max_parallel_downloads, config.max_blocks_per_request, - config.metrics_registry, - )?)) + config.metrics_registry.clone(), + )?; + Ok(Self { + config, + client, + warp: None, + state: None, + chain_sync: Some(chain_sync), + connected_peers_pool: Default::default(), + connected_peers_best: Default::default(), + }) } } /// Notify that a new peer has connected. pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => - strategy.add_peer(peer_id, best_hash, best_number), - SyncingStrategy::StateSyncStrategy(strategy) => - strategy.add_peer(peer_id, best_hash, best_number), - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.add_peer(peer_id, best_hash, best_number), - } + self.connected_peers_pool.add_peer(peer_id); + self.connected_peers_best.insert(peer_id, (best_hash, best_number)); + + self.warp.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); + self.state.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); + self.chain_sync + .iter_mut() + .for_each(|s| s.add_peer(peer_id, best_hash, best_number)); } /// Notify that a peer has disconnected. pub fn remove_peer(&mut self, peer_id: &PeerId) { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => strategy.remove_peer(peer_id), - SyncingStrategy::StateSyncStrategy(strategy) => strategy.remove_peer(peer_id), - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.remove_peer(peer_id), - } + self.warp.iter_mut().for_each(|s| s.remove_peer(peer_id)); + self.state.iter_mut().for_each(|s| s.remove_peer(peer_id)); + self.chain_sync.iter_mut().for_each(|s| s.remove_peer(peer_id)); + + self.connected_peers_pool.remove_peer(peer_id); + self.connected_peers_best.remove(peer_id); } /// Submit a validated block announcement. @@ -234,14 +255,21 @@ where peer_id: PeerId, announce: &BlockAnnounce, ) -> Option<(B::Hash, NumberFor)> { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => - strategy.on_validated_block_announce(is_best, peer_id, announce), - SyncingStrategy::StateSyncStrategy(strategy) => - strategy.on_validated_block_announce(is_best, peer_id, announce), - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.on_validated_block_announce(is_best, peer_id, announce), + let new_best = if let Some(ref mut warp) = self.warp { + warp.on_validated_block_announce(is_best, peer_id, announce) + } else if let Some(ref mut state) = self.state { + state.on_validated_block_announce(is_best, peer_id, announce) + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_validated_block_announce(is_best, peer_id, announce) + } else { + Some((announce.header.hash(), *announce.header.number())) + }; + if let Some(new_best) = new_best { + if let Some(best) = self.connected_peers_best.get_mut(&peer_id) { + *best = new_best; + } } + new_best } /// Configure an explicit fork sync request in case external code has detected that there is a @@ -252,40 +280,33 @@ where hash: &B::Hash, number: NumberFor, ) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.set_sync_fork_request(peers, hash, number), + // Fork requests are only handled by `ChainSync`. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.set_sync_fork_request(peers.clone(), hash, number); } } /// Request extra justification. pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.request_justification(hash, number), + // Justifications can only be requested via `ChainSync`. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.request_justification(hash, number); } } /// Clear extra justification requests. pub fn clear_justification_requests(&mut self) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.clear_justification_requests(), + // Justification requests can only be cleared by `ChainSync`. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.clear_justification_requests(); } } /// Report a justification import (successful or not). pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.on_justification_import(hash, number, success), + // Only `ChainSync` is interested in justification import. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_justification_import(hash, number, success); } } @@ -296,33 +317,29 @@ where request: BlockRequest, blocks: Vec>, ) { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => - strategy.on_block_response(peer_id, request, blocks), - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.on_block_response(peer_id, request, blocks), + // Only `WarpSync` and `ChainSync` handle block responses. + if let Some(ref mut warp) = self.warp { + warp.on_block_response(peer_id, request, blocks); + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_block_response(peer_id, request, blocks); } } /// Process state response. pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(strategy) => - strategy.on_state_response(peer_id, response), - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.on_state_response(peer_id, response), + // Only `StateStrategy` and `ChainSync` handle state responses. + if let Some(ref mut state) = self.state { + state.on_state_response(peer_id, response); + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_state_response(peer_id, response); } } /// Process warp proof response. pub fn on_warp_proof_response(&mut self, peer_id: &PeerId, response: EncodedProof) { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => - strategy.on_warp_proof_response(peer_id, response), - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(_) => {}, + // Only `WarpSync` handles warp proof responses. + if let Some(ref mut warp) = self.warp { + warp.on_warp_proof_response(peer_id, response); } } @@ -333,114 +350,145 @@ where count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(strategy) => - strategy.on_blocks_processed(imported, count, results), - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.on_blocks_processed(imported, count, results), + // Only `StateStrategy` and `ChainSync` are interested in block processing notifications. + + if let Some(ref mut state) = self.state { + state.on_blocks_processed(imported, count, results); + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_blocks_processed(imported, count, results); } } /// Notify a syncing strategy that a block has been finalized. pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.on_block_finalized(hash, number), + // Only `ChainSync` is interested in block finalization notifications. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_block_finalized(hash, number); } } /// Inform sync about a new best imported block. pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.update_chain_info(best_hash, best_number), + // This is relevant to `ChainSync` only. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.update_chain_info(best_hash, best_number); } } // Are we in major sync mode? pub fn is_major_syncing(&self) -> bool { - match self { - SyncingStrategy::WarpSyncStrategy(_) => true, - SyncingStrategy::StateSyncStrategy(_) => true, - SyncingStrategy::ChainSyncStrategy(strategy) => - strategy.status().state.is_major_syncing(), - } + self.warp.is_some() || + self.state.is_some() || + match self.chain_sync { + Some(ref s) => s.status().state.is_major_syncing(), + None => unreachable!("At least one syncing startegy is active; qed"), + } } /// Get the number of peers known to the syncing strategy. pub fn num_peers(&self) -> usize { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => strategy.num_peers(), - SyncingStrategy::StateSyncStrategy(strategy) => strategy.num_peers(), - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.num_peers(), - } + self.connected_peers_best.len() } /// Returns the current sync status. pub fn status(&self) -> SyncStatus { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => strategy.status(), - SyncingStrategy::StateSyncStrategy(strategy) => strategy.status(), - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.status(), + // This function presumes that startegies are executed serially and must be refactored + // once we have parallel strategies. + if let Some(ref warp) = self.warp { + warp.status() + } else if let Some(ref state) = self.state { + state.status() + } else if let Some(ref chain_sync) = self.chain_sync { + chain_sync.status() + } else { + unreachable!("At least one syncing startegy is always active; qed") } } /// Get the total number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - match self { - SyncingStrategy::WarpSyncStrategy(_) => 0, - SyncingStrategy::StateSyncStrategy(_) => 0, - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.num_downloaded_blocks(), + if let Some(ref chain_sync) = self.chain_sync { + chain_sync.num_downloaded_blocks() + } else { + 0 } } /// Get an estimate of the number of parallel sync requests. pub fn num_sync_requests(&self) -> usize { - match self { - SyncingStrategy::WarpSyncStrategy(_) => 0, - SyncingStrategy::StateSyncStrategy(_) => 0, - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.num_sync_requests(), + if let Some(ref chain_sync) = self.chain_sync { + chain_sync.num_sync_requests() + } else { + 0 } } /// Report Prometheus metrics pub fn report_metrics(&self) { - match self { - SyncingStrategy::WarpSyncStrategy(_) => {}, - SyncingStrategy::StateSyncStrategy(_) => {}, - SyncingStrategy::ChainSyncStrategy(strategy) => strategy.report_metrics(), + if let Some(ref chain_sync) = self.chain_sync { + chain_sync.report_metrics(); + } + } + + /// Let `WarpSync` know about target block header + pub fn set_warp_sync_target_block_header( + &mut self, + target_header: B::Header, + ) -> Result<(), ()> { + if let Some(ref mut warp) = self.warp { + warp.set_target_block(target_header); + Ok(()) + } else { + error!( + target: LOG_TARGET, + "Cannot set warp sync target block: no warp sync strategy is active." + ); + debug_assert!(false); + Err(()) } } /// Get actions that should be performed by the owner on the strategy's behalf #[must_use] - pub fn actions(&mut self) -> Box>> { - match self { - SyncingStrategy::WarpSyncStrategy(strategy) => - Box::new(strategy.actions().map(|action| match action { + pub fn actions(&mut self) -> Result>, ClientError> { + // This function presumes that strategies are executed serially and must be refactored once + // we have parallel startegies. + if let Some(ref mut warp) = self.warp { + let mut actions = Vec::new(); + for action in warp.actions() { + actions.push(match action { WarpSyncAction::SendWarpProofRequest { peer_id, request } => SyncingAction::SendWarpProofRequest { peer_id, request }, WarpSyncAction::SendBlockRequest { peer_id, request } => SyncingAction::SendBlockRequest { peer_id, request }, WarpSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - WarpSyncAction::Finished => SyncingAction::Finished, - })), - SyncingStrategy::StateSyncStrategy(strategy) => - Box::new(strategy.actions().map(|action| match action { + WarpSyncAction::Finished => { + self.proceed_to_next()?; + return Ok(actions) + }, + }); + } + Ok(actions) + } else if let Some(ref mut state) = self.state { + let mut actions = Vec::new(); + for action in state.actions() { + actions.push(match action { StateStrategyAction::SendStateRequest { peer_id, request } => SyncingAction::SendStateRequest { peer_id, request }, StateStrategyAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), StateStrategyAction::ImportBlocks { origin, blocks } => SyncingAction::ImportBlocks { origin, blocks }, - StateStrategyAction::Finished => SyncingAction::Finished, - })), - SyncingStrategy::ChainSyncStrategy(strategy) => - Box::new(strategy.actions().map(|action| match action { + StateStrategyAction::Finished => { + self.proceed_to_next()?; + return Ok(actions) + }, + }); + } + Ok(actions) + } else if let Some(ref mut chain_sync) = self.chain_sync { + Ok(chain_sync + .actions() + .map(|action| match action { ChainSyncAction::SendBlockRequest { peer_id, request } => SyncingAction::SendBlockRequest { peer_id, request }, ChainSyncAction::CancelBlockRequest { peer_id } => @@ -461,98 +509,99 @@ where number, justifications, }, - })), + }) + .collect()) + } else { + unreachable!("At least one syncing startegy is always active; qed") } } - /// Switch to next strategy if the active one finished. - pub fn switch_to_next( - &mut self, - config: SyncingConfig, - client: Arc, - connected_peers: impl Iterator)>, - ) -> Result<(), ClientError> { - match self { - Self::WarpSyncStrategy(warp_sync) => { - match warp_sync.take_result() { - Some(res) => { - info!( - target: LOG_TARGET, - "Warp sync is complete, continuing with state sync." - ); - let state_sync = StateStrategy::new( - client, - res.target_header, - res.target_body, - res.target_justifications, - // skip proofs, only set to `true` in `FastUnsafe` sync mode - false, - connected_peers - .map(|(peer_id, _best_hash, best_number)| (peer_id, best_number)), - ); - - *self = Self::StateSyncStrategy(state_sync); - }, - None => { - error!( - target: LOG_TARGET, - "Warp sync failed. Falling back to full sync." - ); - let mut chain_sync = match ChainSync::new( - chain_sync_mode(config.mode), - client, - config.max_parallel_downloads, - config.max_blocks_per_request, - config.metrics_registry, - ) { - Ok(chain_sync) => chain_sync, - Err(e) => { - error!(target: LOG_TARGET, "Failed to start `ChainSync`."); - return Err(e) - }, - }; - // Let `ChainSync` know about connected peers. - connected_peers.into_iter().for_each( - |(peer_id, best_hash, best_number)| { - chain_sync.add_peer(peer_id, best_hash, best_number) - }, - ); - - *self = Self::ChainSyncStrategy(chain_sync); - }, - } - }, - Self::StateSyncStrategy(state_sync) => { - if state_sync.is_succeded() { - info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); - } else { - error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); - } - let mut chain_sync = match ChainSync::new( - chain_sync_mode(config.mode), - client, - config.max_parallel_downloads, - config.max_blocks_per_request, - config.metrics_registry, - ) { - Ok(chain_sync) => chain_sync, - Err(e) => { - error!(target: LOG_TARGET, "Failed to start `ChainSync`."); - return Err(e); - }, - }; - // Let `ChainSync` know about connected peers. - connected_peers.into_iter().for_each(|(peer_id, best_hash, best_number)| { - chain_sync.add_peer(peer_id, best_hash, best_number) + /// Proceed with the next strategy if the active one finished. + pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { + // The strategies are switched as `WarpSync` -> `StateStartegy` -> `ChainSync`. + if let Some(ref mut warp) = self.warp { + match warp.take_result() { + Some(res) => { + info!( + target: LOG_TARGET, + "Warp sync finished, continuing with state sync." + ); + let state_sync = StateStrategy::new( + self.client.clone(), + res.target_header, + res.target_body, + res.target_justifications, + false, + self.connected_peers_best + .iter() + .map(|(peer_id, (_, best_number))| (*peer_id, *best_number)), + ); + + self.warp = None; + self.state = Some(state_sync); + Ok(()) + }, + None => { + error!( + target: LOG_TARGET, + "Warp sync failed. Continuing with full sync." + ); + let mut chain_sync = match ChainSync::new( + chain_sync_mode(self.config.mode), + self.client.clone(), + self.config.max_parallel_downloads, + self.config.max_blocks_per_request, + self.config.metrics_registry.clone(), + ) { + Ok(chain_sync) => chain_sync, + Err(e) => { + error!(target: LOG_TARGET, "Failed to start `ChainSync`."); + return Err(e) + }, + }; + // Let `ChainSync` know about connected peers. + self.connected_peers_best.iter().for_each( + |(peer_id, (best_hash, best_number))| { + chain_sync.add_peer(*peer_id, *best_hash, *best_number) + }, + ); + + self.warp = None; + self.chain_sync = Some(chain_sync); + Ok(()) + }, + } + } else if let Some(state) = &self.state { + if state.is_succeded() { + info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); + } else { + error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); + } + let mut chain_sync = match ChainSync::new( + chain_sync_mode(self.config.mode), + self.client.clone(), + self.config.max_parallel_downloads, + self.config.max_blocks_per_request, + self.config.metrics_registry.clone(), + ) { + Ok(chain_sync) => chain_sync, + Err(e) => { + error!(target: LOG_TARGET, "Failed to start `ChainSync`."); + return Err(e); + }, + }; + // Let `ChainSync` know about connected peers. + self.connected_peers_best + .iter() + .for_each(|(peer_id, (best_hash, best_number))| { + chain_sync.add_peer(*peer_id, *best_hash, *best_number) }); - *self = Self::ChainSyncStrategy(chain_sync); - }, - Self::ChainSyncStrategy(_) => { - error!(target: LOG_TARGET, "`ChainSyncStrategy` is final startegy, cannot switch to next."); - debug_assert!(false); - }, + self.state = None; + self.chain_sync = Some(chain_sync); + Ok(()) + } else { + unreachable!("Only warp & state strategies can finish; qed") } - Ok(()) } } diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index ae3f7b600559..12d36ff9e01a 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -330,11 +330,6 @@ impl StateStrategy { } } - /// Get the number of peers known to syncing. - pub fn num_peers(&self) -> usize { - self.peers.len() - } - /// Get actions that should be performed by the owner on [`WarpSync`]'s behalf #[must_use] pub fn actions(&mut self) -> impl Iterator> { From 6ed85cd2361dc57e19f7ae232c8c21fe7831afb7 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 26 Dec 2023 15:45:54 +0200 Subject: [PATCH 04/31] Reserve peers for `WarpSync` strategy in `PeerPool` --- substrate/client/network/sync/src/strategy.rs | 7 +- .../client/network/sync/src/strategy/warp.rs | 177 +++++++++++++----- 2 files changed, 133 insertions(+), 51 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 721571a7db58..54cf77fa6297 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -84,7 +84,7 @@ impl PeerStatus { } #[derive(Clone, Default)] -struct PeerPool { +pub struct PeerPool { peers: Arc>>, } @@ -194,14 +194,15 @@ where if let SyncMode::Warp = config.mode { let warp_sync_config = warp_sync_config .expect("Warp sync configuration must be supplied in warp sync mode."); - let warp_sync = WarpSync::new(client.clone(), warp_sync_config); + let peer_pool: PeerPool = Default::default(); + let warp_sync = WarpSync::new(client.clone(), warp_sync_config, peer_pool.clone()); Ok(Self { config, client, warp: Some(warp_sync), state: None, chain_sync: None, - connected_peers_pool: Default::default(), + connected_peers_pool: peer_pool, connected_peers_best: Default::default(), }) } else { diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 7935b5f29b68..d69373f45ed7 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -20,6 +20,7 @@ pub use sp_consensus_grandpa::{AuthorityList, SetId}; +use super::PeerPool; use crate::{ strategy::chain_sync::validate_blocks, types::{BadPeer, SyncState, SyncStatus}, @@ -28,7 +29,7 @@ use crate::{ use codec::{Decode, Encode}; use futures::channel::oneshot; use libp2p::PeerId; -use log::{debug, error, trace}; +use log::{debug, error, trace, warn}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; @@ -242,6 +243,7 @@ pub struct WarpSync { peers: HashMap>, actions: Vec>, result: Option>, + peer_pool: PeerPool, } impl WarpSync @@ -252,7 +254,11 @@ where /// Create a new instance. When passing a warp sync provider we will be checking for proof and /// authorities. Alternatively we can pass a target block when we want to skip downloading /// proofs, in this case we will continue polling until the target block is known. - pub fn new(client: Arc, warp_sync_config: WarpSyncConfig) -> Self { + pub fn new( + client: Arc, + warp_sync_config: WarpSyncConfig, + peer_pool: PeerPool, + ) -> Self { if client.info().finalized_state.is_some() { error!( target: LOG_TARGET, @@ -266,6 +272,7 @@ where peers: HashMap::new(), actions: vec![WarpSyncAction::Finished], result: None, + peer_pool, } } @@ -283,6 +290,7 @@ where peers: HashMap::new(), actions: Vec::new(), result: None, + peer_pool, } } @@ -355,6 +363,7 @@ where if let Some(peer) = self.peers.get_mut(peer_id) { peer.state = PeerState::Available; } + self.peer_pool.free_peer(&peer_id); let Phase::WarpProof { set_id, authorities, last_hash, warp_sync_provider } = &mut self.phase @@ -413,6 +422,7 @@ where if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } + self.peer_pool.free_peer(&peer_id); let Phase::TargetBlock(header) = &mut self.phase else { debug!(target: LOG_TARGET, "Unexpected target block response from {peer_id}"); @@ -481,18 +491,36 @@ where min_best_number: Option>, ) -> Option { let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); - if targets.is_empty() { + if targets.is_empty { return None } targets.sort(); let median = targets[targets.len() / 2]; let threshold = std::cmp::max(median, min_best_number.unwrap_or(Zero::zero())); - // Find a random peer that is synced as much as peer majority and is above + // Find a random available peer that is synced as much as peer majority and is above // `min_best_number`. - for (peer_id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= threshold { - peer.state = new_state; - return Some(*peer_id) + for peer_id in self.peer_pool.available_peers() { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.state.is_available() && peer.best_number >= threshold { + if self.peer_pool.try_reserve_peer(&peer_id) { + peer.state = new_state; + return Some(peer_id) + } else { + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} in the peer pool that was \ + just returned as available.", + ); + debug_assert!(false); + } + } + } else { + warn!( + target: LOG_TARGET, + "State inconsistency: peer {peer_id} is in the pool of connected peers, \ + but not known to `WarpSync`.", + ); + debug_assert!(false); } } None @@ -730,7 +758,7 @@ mod test { let client = mock_client_with_state(); let provider = MockWarpSyncProvider::::new(); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let mut warp_sync = WarpSync::new(Arc::new(client), config, Default::default()); // Warp sync instantly finishes let actions = warp_sync.actions().collect::>(); @@ -745,7 +773,7 @@ mod test { fn warp_sync_to_target_for_db_with_finalized_state_is_noop() { let client = mock_client_with_state(); let config = WarpSyncConfig::WaitForTarget; - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let mut warp_sync = WarpSync::new(Arc::new(client), config, Default::default()); // Warp sync instantly finishes let actions = warp_sync.actions().collect::>(); @@ -761,7 +789,7 @@ mod test { let client = mock_client_without_state(); let provider = MockWarpSyncProvider::::new(); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let mut warp_sync = WarpSync::new(Arc::new(client), config, Default::default()); // No actions are emitted. assert_eq!(warp_sync.actions().count(), 0) @@ -771,7 +799,7 @@ mod test { fn warp_sync_to_target_for_empty_db_doesnt_finish_instantly() { let client = mock_client_without_state(); let config = WarpSyncConfig::WaitForTarget; - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let mut warp_sync = WarpSync::new(Arc::new(client), config, Default::default()); // No actions are emitted. assert_eq!(warp_sync.actions().count(), 0) @@ -786,16 +814,21 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Warp sync is not started when there is not enough peers. for _ in 0..(MIN_PEERS_TO_START_WARP_SYNC - 1) { - warp_sync.add_peer(PeerId::random(), Hash::random(), 10); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), 10); assert!(matches!(warp_sync.phase, Phase::WaitingForPeers { .. })) } // Now we have enough peers and warp sync is started. - warp_sync.add_peer(PeerId::random(), Hash::random(), 10); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), 10); assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })) } @@ -804,7 +837,7 @@ mod test { let client = mock_client_without_state(); let provider = MockWarpSyncProvider::::new(); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let mut warp_sync = WarpSync::new(Arc::new(client), config, Default::default()); assert!(warp_sync.schedule_next_peer(PeerState::DownloadingProofs, None).is_none()); } @@ -828,10 +861,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } let peer_id = warp_sync.schedule_next_peer(PeerState::DownloadingProofs, None); @@ -849,10 +885,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } let peer_id = warp_sync.schedule_next_peer(PeerState::DownloadingProofs, Some(10)); @@ -869,11 +908,14 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set to another phase. @@ -892,11 +934,14 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -923,11 +968,14 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make requests. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -950,11 +998,14 @@ mod test { Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure"))) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -991,11 +1042,14 @@ mod test { Ok(VerificationResult::Partial(set_id, authorities, Hash::random())) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1035,11 +1089,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1068,11 +1125,14 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // We are not in `Phase::TargetBlock` assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1103,11 +1163,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set `TargetBlock` phase. @@ -1135,11 +1198,14 @@ mod test { .block; let target_header = target_block.header().clone(); let config = WarpSyncConfig::WaitForTarget; - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // No actions generated so far. @@ -1179,11 +1245,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set `TargetBlock` phase. @@ -1217,11 +1286,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set `TargetBlock` phase. @@ -1271,11 +1343,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set `TargetBlock` phase. @@ -1348,11 +1423,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set `TargetBlock` phase. @@ -1401,11 +1479,14 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config); + let peer_pool: PeerPool = Default::default(); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { - warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); } // Manually set `TargetBlock` phase. From 3aaa9efa59392fd8bd5a964413a706b4ebebf4f0 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 26 Dec 2023 16:31:18 +0200 Subject: [PATCH 05/31] Reserve peers for `StateStrategy` in `PeerPool` --- substrate/client/network/sync/src/strategy.rs | 43 +++-- .../client/network/sync/src/strategy/state.rs | 148 ++++++++++++++---- .../client/network/sync/src/strategy/warp.rs | 18 +-- 3 files changed, 147 insertions(+), 62 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 54cf77fa6297..e895dff3da45 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -170,8 +170,8 @@ pub struct SyncingStrategy { warp: Option>, state: Option>, chain_sync: Option>, - connected_peers_pool: PeerPool, - connected_peers_best: HashMap)>, + peer_pool: PeerPool, + peer_best_blocks: HashMap)>, } impl SyncingStrategy @@ -202,8 +202,8 @@ where warp: Some(warp_sync), state: None, chain_sync: None, - connected_peers_pool: peer_pool, - connected_peers_best: Default::default(), + peer_pool, + peer_best_blocks: Default::default(), }) } else { let chain_sync = ChainSync::new( @@ -219,16 +219,16 @@ where warp: None, state: None, chain_sync: Some(chain_sync), - connected_peers_pool: Default::default(), - connected_peers_best: Default::default(), + peer_pool: Default::default(), + peer_best_blocks: Default::default(), }) } } /// Notify that a new peer has connected. pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - self.connected_peers_pool.add_peer(peer_id); - self.connected_peers_best.insert(peer_id, (best_hash, best_number)); + self.peer_pool.add_peer(peer_id); + self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); self.warp.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); self.state.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); @@ -243,8 +243,8 @@ where self.state.iter_mut().for_each(|s| s.remove_peer(peer_id)); self.chain_sync.iter_mut().for_each(|s| s.remove_peer(peer_id)); - self.connected_peers_pool.remove_peer(peer_id); - self.connected_peers_best.remove(peer_id); + self.peer_pool.remove_peer(peer_id); + self.peer_best_blocks.remove(peer_id); } /// Submit a validated block announcement. @@ -266,7 +266,7 @@ where Some((announce.header.hash(), *announce.header.number())) }; if let Some(new_best) = new_best { - if let Some(best) = self.connected_peers_best.get_mut(&peer_id) { + if let Some(best) = self.peer_best_blocks.get_mut(&peer_id) { *best = new_best; } } @@ -388,7 +388,7 @@ where /// Get the number of peers known to the syncing strategy. pub fn num_peers(&self) -> usize { - self.connected_peers_best.len() + self.peer_best_blocks.len() } /// Returns the current sync status. @@ -533,9 +533,10 @@ where res.target_body, res.target_justifications, false, - self.connected_peers_best + self.peer_best_blocks .iter() .map(|(peer_id, (_, best_number))| (*peer_id, *best_number)), + self.peer_pool.clone(), ); self.warp = None; @@ -561,11 +562,9 @@ where }, }; // Let `ChainSync` know about connected peers. - self.connected_peers_best.iter().for_each( - |(peer_id, (best_hash, best_number))| { - chain_sync.add_peer(*peer_id, *best_hash, *best_number) - }, - ); + self.peer_best_blocks.iter().for_each(|(peer_id, (best_hash, best_number))| { + chain_sync.add_peer(*peer_id, *best_hash, *best_number) + }); self.warp = None; self.chain_sync = Some(chain_sync); @@ -592,11 +591,9 @@ where }, }; // Let `ChainSync` know about connected peers. - self.connected_peers_best - .iter() - .for_each(|(peer_id, (best_hash, best_number))| { - chain_sync.add_peer(*peer_id, *best_hash, *best_number) - }); + self.peer_best_blocks.iter().for_each(|(peer_id, (best_hash, best_number))| { + chain_sync.add_peer(*peer_id, *best_hash, *best_number) + }); self.state = None; self.chain_sync = Some(chain_sync); diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 12d36ff9e01a..dd8be8db80aa 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -18,6 +18,7 @@ //! State sync strategy. +use super::PeerPool; use crate::{ schema::v1::StateResponse, strategy::state_sync::{ImportResult, StateSync, StateSyncProvider}, @@ -25,7 +26,7 @@ use crate::{ LOG_TARGET, }; use libp2p::PeerId; -use log::{debug, error, trace}; +use log::{debug, error, info, trace, warn}; use sc_client_api::ProofProvider; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::BlockAnnounce; @@ -80,6 +81,7 @@ pub struct StateStrategy { peers: HashMap>, actions: Vec>, succeded: bool, + peer_pool: PeerPool, } impl StateStrategy { @@ -91,6 +93,7 @@ impl StateStrategy { target_justifications: Option, skip_proof: bool, initial_peers: impl Iterator)>, + peer_pool: PeerPool, ) -> Self where Client: ProofProvider + Send + Sync + 'static, @@ -111,6 +114,7 @@ impl StateStrategy { peers, actions: Vec::new(), succeded: false, + peer_pool, } } @@ -120,6 +124,7 @@ impl StateStrategy { fn new_with_provider( state_sync_provider: Box>, initial_peers: impl Iterator)>, + peer_pool: PeerPool, ) -> Self { Self { state_sync: state_sync_provider, @@ -130,6 +135,7 @@ impl StateStrategy { .collect(), actions: Vec::new(), succeded: false, + peer_pool, } } @@ -179,6 +185,7 @@ impl StateStrategy { if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } + self.peer_pool.free_peer(&peer_id); let response: Box = response.0.downcast().map_err(|_error| { error!( @@ -304,10 +311,28 @@ impl StateStrategy { let threshold = std::cmp::max(median, min_best_number); // Find a random peer that is synced as much as peer majority and is above // `min_best_number`. - for (peer_id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.best_number >= threshold { - peer.state = new_state; - return Some(*peer_id) + for peer_id in self.peer_pool.available_peers() { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.state.is_available() && peer.best_number >= threshold { + if self.peer_pool.try_reserve_peer(&peer_id) { + peer.state = new_state; + return Some(peer_id) + } else { + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} in the peer pool that was \ + just returned as available (`StateStrategy`).", + ); + debug_assert!(false); + } + } + } else { + warn!( + target: LOG_TARGET, + "State inconsistency: peer {peer_id} is in the pool of connected peers, \ + but not known to `StateStrategy`.", + ); + debug_assert!(false); } } None @@ -392,8 +417,15 @@ mod test { .block; let target_header = target_block.header().clone(); - let mut state_strategy = - StateStrategy::new(client, target_header, None, None, false, std::iter::empty()); + let mut state_strategy = StateStrategy::new( + client, + target_header, + None, + None, + false, + std::iter::empty(), + Default::default(), + ); assert!(state_strategy .schedule_next_peer(PeerState::DownloadingState, Zero::zero()) @@ -417,6 +449,8 @@ mod test { .map(|best_number| (PeerId::random(), best_number)) .collect::>(); let initial_peers = peers.iter().map(|(p, n)| (*p, *n)); + let peer_pool: PeerPool = Default::default(); + peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -425,6 +459,7 @@ mod test { None, false, initial_peers, + peer_pool, ); let peer_id = @@ -450,6 +485,8 @@ mod test { .map(|best_number| (PeerId::random(), best_number)) .collect::>(); let initial_peers = peers.iter().map(|(p, n)| (*p, *n)); + let peer_pool: PeerPool = Default::default(); + peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -458,6 +495,7 @@ mod test { None, false, initial_peers, + peer_pool, ); let peer_id = state_strategy.schedule_next_peer(PeerState::DownloadingState, 10); @@ -477,7 +515,10 @@ mod test { .unwrap() .block; - let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)); + let initial_peers = + (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); + let peer_pool: PeerPool = Default::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -485,7 +526,8 @@ mod test { None, None, false, - initial_peers, + initial_peers.into_iter(), + peer_pool, ); let (_peer_id, mut opaque_request) = state_strategy.state_request().unwrap(); @@ -507,7 +549,10 @@ mod test { .unwrap() .block; - let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)); + let initial_peers = + (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); + let peer_pool: PeerPool = Default::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -515,7 +560,8 @@ mod test { None, None, false, - initial_peers, + initial_peers.into_iter(), + peer_pool, ); // First request is sent. @@ -531,15 +577,23 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers); + let peer_pool: PeerPool = Default::default(); + peer_pool.add_peer(peer_id); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + initial_peers, + peer_pool.clone(), + ); // Manually set the peer's state. + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); state_strategy.on_state_response(peer_id, dummy_response); assert!(state_strategy.peers.get(&peer_id).unwrap().state.is_available()); + // Peer is also available in `PeerPool`. + assert!(peer_pool.try_reserve_peer(&peer_id)); } #[test] @@ -549,12 +603,19 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::BadResponse); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers); + let peer_pool: PeerPool = Default::default(); + peer_pool.add_peer(peer_id); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + initial_peers, + peer_pool.clone(), + ); // Manually set the peer's state. + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; + + // Receiving a response drops the peer. let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); - // Receiving response drops the peer. assert!(matches!( state_strategy.on_state_response_inner(peer_id, dummy_response), Err(BadPeer(id, _rep)) if id == peer_id, @@ -568,9 +629,15 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers); + let peer_pool: PeerPool = Default::default(); + peer_pool.add_peer(peer_id); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + initial_peers, + peer_pool.clone(), + ); // Manually set the peer's state . + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); @@ -627,9 +694,15 @@ mod test { // Prepare `StateStrategy`. let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers); + let peer_pool: PeerPool = Default::default(); + peer_pool.add_peer(peer_id); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + initial_peers, + peer_pool.clone(), + ); // Manually set the peer's state . + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; // Receive response. @@ -651,8 +724,11 @@ mod test { let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_target_hash().return_const(target_hash); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), std::iter::empty()); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + std::iter::empty(), + Default::default(), + ); // Unknown block imported. state_strategy.on_blocks_processed( @@ -674,8 +750,11 @@ mod test { let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_target_hash().return_const(target_hash); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), std::iter::empty()); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + std::iter::empty(), + Default::default(), + ); // Target block imported. state_strategy.on_blocks_processed( @@ -698,8 +777,11 @@ mod test { let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_target_hash().return_const(target_hash); - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), std::iter::empty()); + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + std::iter::empty(), + Default::default(), + ); // Target block import failed. state_strategy.on_blocks_processed( @@ -724,10 +806,16 @@ mod test { state_sync_provider.expect_is_complete().return_const(true); // Get enough peers for possible spurious requests. - let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)); - - let mut state_strategy = - StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers); + let initial_peers = + (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); + let peer_pool: PeerPool = Default::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); + + let mut state_strategy = StateStrategy::new_with_provider( + Box::new(state_sync_provider), + initial_peers.into_iter(), + peer_pool, + ); state_strategy.on_blocks_processed( 1, diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index d69373f45ed7..c6a857d00ae2 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -491,7 +491,7 @@ where min_best_number: Option>, ) -> Option { let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); - if targets.is_empty { + if targets.is_empty() { return None } targets.sort(); @@ -509,19 +509,19 @@ where warn!( target: LOG_TARGET, "Failed to reserve peer {peer_id} in the peer pool that was \ - just returned as available.", + just returned as available (`WarpSync`).", ); debug_assert!(false); } } - } else { - warn!( - target: LOG_TARGET, - "State inconsistency: peer {peer_id} is in the pool of connected peers, \ - but not known to `WarpSync`.", - ); - debug_assert!(false); } + } else { + warn!( + target: LOG_TARGET, + "State inconsistency: peer {peer_id} is in the pool of connected peers, \ + but not known to `WarpSync`.", + ); + debug_assert!(false); } None } From 02259bc89c9952c66e078c6032688e2bbc9362ad Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 26 Dec 2023 19:27:30 +0200 Subject: [PATCH 06/31] WIP: Reserve peers for `ChainSync` in `PeerPool` For block and state requests. No justification requests yet. --- substrate/client/network/sync/src/strategy.rs | 6 +- .../network/sync/src/strategy/chain_sync.rs | 342 +++++++++++------- .../client/network/sync/src/strategy/state.rs | 2 + .../client/network/sync/src/strategy/warp.rs | 2 + 4 files changed, 228 insertions(+), 124 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index e895dff3da45..f44d1300c47f 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -206,12 +206,14 @@ where peer_best_blocks: Default::default(), }) } else { + let peer_pool: PeerPool = Default::default(); let chain_sync = ChainSync::new( chain_sync_mode(config.mode), client.clone(), config.max_parallel_downloads, config.max_blocks_per_request, config.metrics_registry.clone(), + peer_pool.clone(), )?; Ok(Self { config, @@ -219,7 +221,7 @@ where warp: None, state: None, chain_sync: Some(chain_sync), - peer_pool: Default::default(), + peer_pool, peer_best_blocks: Default::default(), }) } @@ -554,6 +556,7 @@ where self.config.max_parallel_downloads, self.config.max_blocks_per_request, self.config.metrics_registry.clone(), + self.peer_pool.clone(), ) { Ok(chain_sync) => chain_sync, Err(e) => { @@ -583,6 +586,7 @@ where self.config.max_parallel_downloads, self.config.max_blocks_per_request, self.config.metrics_registry.clone(), + self.peer_pool.clone(), ) { Ok(chain_sync) => chain_sync, Err(e) => { diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index df29a2abc466..fae45cf7d432 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -28,6 +28,7 @@ //! the network, or whenever a block has been successfully verified, call the appropriate method in //! order to update it. +use super::PeerPool; use crate::{ blocks::BlockCollection, extra_requests::ExtraRequests, @@ -284,6 +285,8 @@ pub struct ChainSync { actions: Vec>, /// Prometheus metrics. metrics: Option, + /// Peer pool to reserve peers for requests from. + peer_pool: PeerPool, } /// All the data we have about a Peer that we are trying to sync with @@ -375,6 +378,7 @@ where max_parallel_downloads: u32, max_blocks_per_request: u32, metrics_registry: Option, + peer_pool: PeerPool, ) -> Result { let mut sync = Self { client, @@ -404,6 +408,7 @@ where None }, }), + peer_pool, }; sync.reset_sync_start_point()?; @@ -544,46 +549,59 @@ where } // If we are at genesis, just start downloading. - let (state, req) = if self.best_queued_number.is_zero() { + let (new_state, req) = if self.best_queued_number.is_zero() { debug!( target: LOG_TARGET, "New peer {peer_id} with best hash {best_hash} ({best_number}).", ); - (PeerSyncState::Available, None) + (Some(PeerSyncState::Available), None) } else { - let common_best = std::cmp::min(self.best_queued_number, best_number); + if self.peer_pool.try_reserve_peer(&peer_id) { + let common_best = std::cmp::min(self.best_queued_number, best_number); - debug!( - target: LOG_TARGET, - "New peer {} with unknown best hash {} ({}), searching for common ancestor.", - peer_id, - best_hash, - best_number - ); + debug!( + target: LOG_TARGET, + "New peer {} with unknown best hash {} ({}), searching for common ancestor.", + peer_id, + best_hash, + best_number + ); - ( - PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }, - Some(ancestry_request::(common_best)), - ) + ( + Some(PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }), + Some(ancestry_request::(common_best)), + ) + } else { + trace!( + target: LOG_TARGET, + "`ChainSync` is aware of a new peer {peer_id} with unknown best \ + hash {best_hash} ({best_number}), but can't start ancestry search \ + as the peer is reserved by another syncing strategy.", + ); + + (None, None) + } }; - self.allowed_requests.add(&peer_id); - // Replace `PeerSyncState::New` peer. - self.peers.insert( - peer_id, - PeerSync { + if let Some(state) = new_state { + self.allowed_requests.add(&peer_id); + // Replace `PeerSyncState::New` peer. + self.peers.insert( peer_id, - common_number: Zero::zero(), - best_hash, - best_number, - state, - }, - ); + PeerSync { + peer_id, + common_number: Zero::zero(), + best_hash, + best_number, + state, + }, + ); + } req.map(|req| Ok((peer_id, req))) }, @@ -708,6 +726,7 @@ where blocks.reverse() } self.allowed_requests.add(peer_id); + self.peer_pool.free_peer(peer_id); if let Some(request) = request { match &mut peer.state { PeerSyncState::DownloadingNew(_) => { @@ -962,6 +981,7 @@ where }; self.allowed_requests.add(&peer_id); + self.peer_pool.free_peer(&peer_id); if let PeerSyncState::DownloadingJustification(hash) = peer.state { peer.state = PeerSyncState::Available; @@ -1502,11 +1522,14 @@ where /// Get justification requests scheduled by sync to be sent out. fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; + // TODO: filter peers to only include `self.peer_pool.available_peers()`. + // As an option, pass available peers to `matcher()`. let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(peers) { + if let Some((peer_id, request)) = matcher.next(peers) { + // TODO: reserve the peer in `PeerPool`. peers - .get_mut(&peer) + .get_mut(&peer_id) .expect( "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", ) @@ -1518,7 +1541,7 @@ where direction: Direction::Ascending, max: Some(1), }; - Some((peer, req)) + Some((peer_id, req)) } else { None } @@ -1549,98 +1572,149 @@ where let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; let max_blocks_per_request = self.max_blocks_per_request; let gap_sync = &mut self.gap_sync; - self.peers - .iter_mut() - .filter_map(move |(&id, peer)| { - if !peer.state.is_available() || !allowed_requests.contains(&id) { - return None - } - // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from - // the common number, the peer best number is higher than our best queued and the - // common number is smaller than the last finalized block number, we should do an - // ancestor search to find a better common block. If the queue is full we wait till - // all blocks are imported though. - if best_queued.saturating_sub(peer.common_number) > - MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && - best_queued < peer.best_number && - peer.common_number < last_finalized && - queue.len() <= MAJOR_SYNC_BLOCKS.into() - { - trace!( - target: LOG_TARGET, - "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", - id, - peer.common_number, - best_queued, - ); - let current = std::cmp::min(peer.best_number, best_queued); - peer.state = PeerSyncState::AncestorSearch { - current, - start: best_queued, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }; - Some((id, ancestry_request::(current))) - } else if let Some((range, req)) = peer_block_request( - &id, - peer, - blocks, - attrs, - max_parallel, - max_blocks_per_request, - last_finalized, - best_queued, - ) { - peer.state = PeerSyncState::DownloadingNew(range.start); - trace!( - target: LOG_TARGET, - "New block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, - ); - Some((id, req)) - } else if let Some((hash, req)) = fork_sync_request( - &id, - fork_targets, - best_queued, - last_finalized, - attrs, - |hash| { - if queue.contains(hash) { - BlockStatus::Queued + self.peer_pool + .available_peers() + .into_iter() + .filter_map(|peer_id| { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if !peer.state.is_available() || !allowed_requests.contains(&peer_id) { + return None + } + + // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away + // from the common number, the peer best number is higher than our best queued + // and the common number is smaller than the last finalized block number, we + // should do an ancestor search to find a better common block. If the queue is + // full we wait till all blocks are imported though. + if best_queued.saturating_sub(peer.common_number) > + MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && + best_queued < peer.best_number && + peer.common_number < last_finalized && + queue.len() <= MAJOR_SYNC_BLOCKS.into() + { + if self.peer_pool.try_reserve_peer(&peer_id) { + trace!( + target: LOG_TARGET, + "Peer {:?} common block {} too far behind of our best {}. \ + Starting ancestry search.", + peer_id, + peer.common_number, + best_queued, + ); + let current = std::cmp::min(peer.best_number, best_queued); + peer.state = PeerSyncState::AncestorSearch { + current, + start: best_queued, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }; + Some((peer_id, ancestry_request::(current))) } else { - client.block_status(*hash).unwrap_or(BlockStatus::Unknown) + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} that was just returned as available.", + ); + debug_assert!(false); + // FIXME: this `debug_assert` and others alike only make sense until + // no strategy handlers run in parallel in multitasking context. + None } - }, - max_blocks_per_request, - ) { - trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {id}"); - peer.state = PeerSyncState::DownloadingStale(hash); - Some((id, req)) - } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { - peer_gap_block_request( - &id, + } else if let Some((range, req)) = peer_block_request( + &peer_id, peer, - &mut sync.blocks, + blocks, attrs, - sync.target, - sync.best_queued_number, + max_parallel, max_blocks_per_request, - ) - }) { - peer.state = PeerSyncState::DownloadingGap(range.start); - trace!( + last_finalized, + best_queued, + ) { + if self.peer_pool.try_reserve_peer(&peer_id) { + peer.state = PeerSyncState::DownloadingNew(range.start); + trace!( + target: LOG_TARGET, + "New block request for {}, (best:{}, common:{}) {:?}", + peer_id, + peer.best_number, + peer.common_number, + req, + ); + Some((peer_id, req)) + } else { + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} that was just returned as available.", + ); + debug_assert!(false); + None + } + } else if let Some((hash, req)) = fork_sync_request( + &peer_id, + fork_targets, + best_queued, + last_finalized, + attrs, + |hash| { + if queue.contains(hash) { + BlockStatus::Queued + } else { + client.block_status(*hash).unwrap_or(BlockStatus::Unknown) + } + }, + max_blocks_per_request, + ) { + if self.peer_pool.try_reserve_peer(&peer_id) { + trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {peer_id}"); + peer.state = PeerSyncState::DownloadingStale(hash); + Some((peer_id, req)) + } else { + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} that was just returned as available.", + ); + debug_assert!(false); + None + } + } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { + peer_gap_block_request( + &peer_id, + peer, + &mut sync.blocks, + attrs, + sync.target, + sync.best_queued_number, + max_blocks_per_request, + ) + }) { + if self.peer_pool.try_reserve_peer(&peer_id) { + peer.state = PeerSyncState::DownloadingGap(range.start); + trace!( + target: LOG_TARGET, + "New gap block request for {}, (best:{}, common:{}) {:?}", + peer_id, + peer.best_number, + peer.common_number, + req, + ); + Some((peer_id, req)) + } else { + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} that was just returned as available.", + ); + debug_assert!(false); + None + } + } else { + None + } + } else { + warn!( target: LOG_TARGET, - "New gap block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, + "State inconsistency: peer {peer_id} is in the pool of connected peers, \ + but not known to `WarpSync`.", ); - Some((id, req)) - } else { + debug_assert!(false); None } }) @@ -1663,13 +1737,34 @@ where return None } - for (id, peer) in self.peers.iter_mut() { - if peer.state.is_available() && peer.common_number >= sync.target_number() { - peer.state = PeerSyncState::DownloadingState; - let request = sync.next_request(); - trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); - self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))) + for peer_id in self.peer_pool.available_peers() { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.state.is_available() && peer.common_number >= sync.target_number() { + if self.peer_pool.try_reserve_peer(&peer_id) { + peer.state = PeerSyncState::DownloadingState; + let request = sync.next_request(); + trace!( + target: LOG_TARGET, + "New StateRequest for {peer_id}: {request:?}", + ); + self.allowed_requests.clear(); + return Some((peer_id, OpaqueStateRequest(Box::new(request)))) + } else { + warn!( + target: LOG_TARGET, + "Failed to reserve peer {peer_id} in the peer pool that was just \ + returned as available (`ChainSync`).", + ); + debug_assert!(false); + } + } + } else { + warn!( + target: LOG_TARGET, + "State inconsistency: peer {peer_id} is in the pool of connected peers, \ + but not known to `ChainSync`.", + ); + debug_assert!(false); } } } @@ -1695,6 +1790,7 @@ where if let PeerSyncState::DownloadingState = peer.state { peer.state = PeerSyncState::Available; self.allowed_requests.set_all(); + self.peer_pool.free_peer(peer_id); } } let import_result = if let Some(sync) = &mut self.state_sync { diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index dd8be8db80aa..38e060378a29 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -834,4 +834,6 @@ mod test { // No more actions generated. assert_eq!(state_strategy.actions().count(), 0); } + + // TODO: test that peers are freed from `PeerPool` when responses are received. } diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index c6a857d00ae2..4709c4ae4577 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -1521,4 +1521,6 @@ mod test { assert_eq!(result.target_body, body); assert_eq!(result.target_justifications, justifications); } + + // TODO: test that peers are freed from `PeerPool` when responses are received. } From 264723bd42bd3fc5667b8096d0ca291ab9cef6a7 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 15 Jan 2024 15:20:17 +0200 Subject: [PATCH 07/31] minor: fix compilation + rustfmt --- substrate/client/network/sync/src/strategy.rs | 12 ++++++------ .../client/network/sync/src/strategy/state.rs | 2 +- substrate/client/network/sync/src/strategy/warp.rs | 14 +++++++------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index f44d1300c47f..8bc8616a22c2 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -42,7 +42,7 @@ use sc_network_common::sync::{ use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::BlockOrigin; use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, + traits::{Block as BlockT, Header, NumberFor}, Justifications, }; use state::{StateStrategy, StateStrategyAction}; @@ -575,11 +575,11 @@ where }, } } else if let Some(state) = &self.state { - if state.is_succeded() { - info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); - } else { - error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); - } + if state.is_succeded() { + info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); + } else { + error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); + } let mut chain_sync = match ChainSync::new( chain_sync_mode(self.config.mode), self.client.clone(), diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 38e060378a29..f6e01f48b4a9 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -26,7 +26,7 @@ use crate::{ LOG_TARGET, }; use libp2p::PeerId; -use log::{debug, error, info, trace, warn}; +use log::{debug, error, trace, warn}; use sc_client_api::ProofProvider; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::BlockAnnounce; diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 4709c4ae4577..15118ba40213 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -514,14 +514,14 @@ where debug_assert!(false); } } + } else { + warn!( + target: LOG_TARGET, + "State inconsistency: peer {peer_id} is in the pool of connected peers, \ + but not known to `WarpSync`.", + ); + debug_assert!(false); } - } else { - warn!( - target: LOG_TARGET, - "State inconsistency: peer {peer_id} is in the pool of connected peers, \ - but not known to `WarpSync`.", - ); - debug_assert!(false); } None } From 4bb9a7b45ebb108a2d35162284b1c9927e494c28 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 15 Jan 2024 15:32:06 +0200 Subject: [PATCH 08/31] Apply suggestions from code review Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> --- substrate/client/network/sync/src/strategy.rs | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 8bc8616a22c2..7517e5c55453 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -102,7 +102,7 @@ impl PeerPool { .lock() .iter() .filter_map( - |(peer_id, status)| if status.is_available() { Some(*peer_id) } else { None }, + |(peer_id, status)| status.is_available().then_some(*peer_id), ) .collect() } @@ -410,20 +410,12 @@ where /// Get the total number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - if let Some(ref chain_sync) = self.chain_sync { - chain_sync.num_downloaded_blocks() - } else { - 0 - } + self.chain_sync.map_or(0, |chain_sync| chain_sync.num_downloaded_blocks()) } /// Get an estimate of the number of parallel sync requests. pub fn num_sync_requests(&self) -> usize { - if let Some(ref chain_sync) = self.chain_sync { - chain_sync.num_sync_requests() - } else { - 0 - } + self.chain_sync.map_or(0, |chain_sync| chain_sync.num_sync_requests()) } /// Report Prometheus metrics From a1cec8c566881789f849ce8b47a72ed554e484d4 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 15 Jan 2024 15:48:03 +0200 Subject: [PATCH 09/31] Partially revert "Apply suggestions from code review" This reverts commit 4bb9a7b45ebb108a2d35162284b1c9927e494c28. --- substrate/client/network/sync/src/strategy.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 7517e5c55453..32b5dc5c2c71 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -410,12 +410,20 @@ where /// Get the total number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - self.chain_sync.map_or(0, |chain_sync| chain_sync.num_downloaded_blocks()) + if let Some(ref chain_sync) = self.chain_sync { + chain_sync.num_downloaded_blocks() + } else { + 0 + } } /// Get an estimate of the number of parallel sync requests. pub fn num_sync_requests(&self) -> usize { - self.chain_sync.map_or(0, |chain_sync| chain_sync.num_sync_requests()) + if let Some(ref chain_sync) = self.chain_sync { + chain_sync.num_sync_requests() + } else { + 0 + } } /// Report Prometheus metrics From e6c2b3ac2a7784d7d2936a745ba306eb75d2896e Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 15 Jan 2024 16:07:01 +0200 Subject: [PATCH 10/31] minor: docs --- substrate/client/network/sync/src/strategy.rs | 12 +++++------- .../client/network/sync/src/strategy/chain_sync.rs | 2 +- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 32b5dc5c2c71..62d533be3f83 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -101,9 +101,7 @@ impl PeerPool { self.peers .lock() .iter() - .filter_map( - |(peer_id, status)| status.is_available().then_some(*peer_id), - ) + .filter_map(|(peer_id, status)| status.is_available().then_some(*peer_id)) .collect() } @@ -185,7 +183,7 @@ where + Sync + 'static, { - /// Initialize a new syncing startegy. + /// Initialize a new syncing strategy. pub fn new( config: SyncingConfig, client: Arc, @@ -384,7 +382,7 @@ where self.state.is_some() || match self.chain_sync { Some(ref s) => s.status().state.is_major_syncing(), - None => unreachable!("At least one syncing startegy is active; qed"), + None => unreachable!("At least one syncing strategy is active; qed"), } } @@ -404,7 +402,7 @@ where } else if let Some(ref chain_sync) = self.chain_sync { chain_sync.status() } else { - unreachable!("At least one syncing startegy is always active; qed") + unreachable!("At least one syncing strategy is always active; qed") } } @@ -515,7 +513,7 @@ where }) .collect()) } else { - unreachable!("At least one syncing startegy is always active; qed") + unreachable!("At least one syncing strategy is always active; qed") } } diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index fae45cf7d432..32c1e2b24d1a 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -487,6 +487,7 @@ where ); } + /// Process new peers assigning proper states and initiating requests. fn handle_new_peers( &mut self, ) -> impl Iterator), BadPeer>> + '_ { @@ -1372,7 +1373,6 @@ where self.add_peer(peer_id, p.best_hash, p.best_number); // since the request is not a justification, remove it from pending responses - // TODO: check `PeerSyncState`? self.actions.push(ChainSyncAction::CancelBlockRequest { peer_id }); }); } From 1db2ad1f972130a6f2cd579239f09722a2dbc163 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 15 Jan 2024 16:23:26 +0200 Subject: [PATCH 11/31] Apply review suggestions --- substrate/client/network/sync/src/strategy.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 62d533be3f83..c5ee4f60312a 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -408,20 +408,14 @@ where /// Get the total number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - if let Some(ref chain_sync) = self.chain_sync { - chain_sync.num_downloaded_blocks() - } else { - 0 - } + self.chain_sync + .as_ref() + .map_or(0, |chain_sync| chain_sync.num_downloaded_blocks()) } /// Get an estimate of the number of parallel sync requests. pub fn num_sync_requests(&self) -> usize { - if let Some(ref chain_sync) = self.chain_sync { - chain_sync.num_sync_requests() - } else { - 0 - } + self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests()) } /// Report Prometheus metrics From 17f6bbeead9bf2d32d26cf423007c2a7dd7908c1 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 19 Jan 2024 13:02:53 +0200 Subject: [PATCH 12/31] Simplify peer allocation from `PeerPool` for requests --- substrate/client/network/sync/src/strategy.rs | 55 +++--- .../network/sync/src/strategy/chain_sync.rs | 169 +++++++----------- .../client/network/sync/src/strategy/state.rs | 36 ++-- .../client/network/sync/src/strategy/warp.rs | 36 ++-- 4 files changed, 123 insertions(+), 173 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index c5ee4f60312a..205f1b032eb3 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -83,30 +83,43 @@ impl PeerStatus { } } -#[derive(Clone, Default)] +#[derive(Default)] pub struct PeerPool { - peers: Arc>>, + peers: HashMap, +} + +pub struct AvailablePeer<'a> { + peer_id: &'a PeerId, + status: &'a mut PeerStatus, +} + +impl<'a> AvailablePeer<'a> { + pub fn peer_id(&self) -> &'a PeerId { + self.peer_id + } + + pub fn reserve(&mut self) { + *self.status = PeerStatus::Reserved; + } } impl PeerPool { - fn add_peer(&self, peer_id: PeerId) { - self.peers.lock().insert(peer_id, PeerStatus::Available); + fn add_peer(&mut self, peer_id: PeerId) { + self.peers.insert(peer_id, PeerStatus::Available); } - fn remove_peer(&self, peer_id: &PeerId) { - self.peers.lock().remove(peer_id); + fn remove_peer(&mut self, peer_id: &PeerId) { + self.peers.remove(peer_id); } - fn available_peers(&self) -> Vec { - self.peers - .lock() - .iter() - .filter_map(|(peer_id, status)| status.is_available().then_some(*peer_id)) - .collect() + fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { + self.peers.iter_mut().filter_map(|(peer_id, status)| { + status.is_available().then_some(AvailablePeer::<'a> { peer_id, status }) + }) } - fn try_reserve_peer(&self, peer_id: &PeerId) -> bool { - match self.peers.lock().get_mut(peer_id) { + fn try_reserve_peer(&mut self, peer_id: &PeerId) -> bool { + match self.peers.get_mut(peer_id) { Some(peer_status) => match peer_status { PeerStatus::Available => { *peer_status = PeerStatus::Reserved; @@ -121,8 +134,8 @@ impl PeerPool { } } - fn free_peer(&self, peer_id: &PeerId) { - match self.peers.lock().get_mut(peer_id) { + fn free_peer(&mut self, peer_id: &PeerId) { + match self.peers.get_mut(peer_id) { Some(peer_status) => match peer_status { PeerStatus::Available => { warn!(target: LOG_TARGET, "Trying to free available peer {peer_id}.") @@ -168,7 +181,7 @@ pub struct SyncingStrategy { warp: Option>, state: Option>, chain_sync: Option>, - peer_pool: PeerPool, + peer_pool: Arc>, peer_best_blocks: HashMap)>, } @@ -192,7 +205,7 @@ where if let SyncMode::Warp = config.mode { let warp_sync_config = warp_sync_config .expect("Warp sync configuration must be supplied in warp sync mode."); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); let warp_sync = WarpSync::new(client.clone(), warp_sync_config, peer_pool.clone()); Ok(Self { config, @@ -204,7 +217,7 @@ where peer_best_blocks: Default::default(), }) } else { - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); let chain_sync = ChainSync::new( chain_sync_mode(config.mode), client.clone(), @@ -227,7 +240,7 @@ where /// Notify that a new peer has connected. pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - self.peer_pool.add_peer(peer_id); + self.peer_pool.lock().add_peer(peer_id); self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); self.warp.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); @@ -243,7 +256,7 @@ where self.state.iter_mut().for_each(|s| s.remove_peer(peer_id)); self.chain_sync.iter_mut().for_each(|s| s.remove_peer(peer_id)); - self.peer_pool.remove_peer(peer_id); + self.peer_pool.lock().remove_peer(peer_id); self.peer_best_blocks.remove(peer_id); } diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 32c1e2b24d1a..da39a1312a0d 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -44,6 +44,7 @@ use crate::{ use codec::Encode; use libp2p::PeerId; use log::{debug, error, info, trace, warn}; +use parking_lot::Mutex; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -286,7 +287,7 @@ pub struct ChainSync { /// Prometheus metrics. metrics: Option, /// Peer pool to reserve peers for requests from. - peer_pool: PeerPool, + peer_pool: Arc>, } /// All the data we have about a Peer that we are trying to sync with @@ -378,7 +379,7 @@ where max_parallel_downloads: u32, max_blocks_per_request: u32, metrics_registry: Option, - peer_pool: PeerPool, + peer_pool: Arc>, ) -> Result { let mut sync = Self { client, @@ -558,7 +559,7 @@ where (Some(PeerSyncState::Available), None) } else { - if self.peer_pool.try_reserve_peer(&peer_id) { + if self.peer_pool.lock().try_reserve_peer(&peer_id) { let common_best = std::cmp::min(self.best_queued_number, best_number); debug!( @@ -727,7 +728,7 @@ where blocks.reverse() } self.allowed_requests.add(peer_id); - self.peer_pool.free_peer(peer_id); + self.peer_pool.lock().free_peer(peer_id); if let Some(request) = request { match &mut peer.state { PeerSyncState::DownloadingNew(_) => { @@ -982,7 +983,7 @@ where }; self.allowed_requests.add(&peer_id); - self.peer_pool.free_peer(&peer_id); + self.peer_pool.lock().free_peer(&peer_id); if let PeerSyncState::DownloadingJustification(hash) = peer.state { peer.state = PeerSyncState::Available; @@ -1574,11 +1575,12 @@ where let gap_sync = &mut self.gap_sync; self.peer_pool + .lock() .available_peers() - .into_iter() - .filter_map(|peer_id| { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if !peer.state.is_available() || !allowed_requests.contains(&peer_id) { + .filter_map(|mut available_peer| { + let peer_id = available_peer.peer_id(); + if let Some(peer) = self.peers.get_mut(peer_id) { + if !allowed_requests.contains(&peer_id) { return None } @@ -1593,32 +1595,22 @@ where peer.common_number < last_finalized && queue.len() <= MAJOR_SYNC_BLOCKS.into() { - if self.peer_pool.try_reserve_peer(&peer_id) { - trace!( - target: LOG_TARGET, - "Peer {:?} common block {} too far behind of our best {}. \ - Starting ancestry search.", - peer_id, - peer.common_number, - best_queued, - ); - let current = std::cmp::min(peer.best_number, best_queued); - peer.state = PeerSyncState::AncestorSearch { - current, - start: best_queued, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }; - Some((peer_id, ancestry_request::(current))) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} that was just returned as available.", - ); - debug_assert!(false); - // FIXME: this `debug_assert` and others alike only make sense until - // no strategy handlers run in parallel in multitasking context. - None - } + available_peer.reserve(); + trace!( + target: LOG_TARGET, + "Peer {:?} common block {} too far behind of our best {}. \ + Starting ancestry search.", + peer_id, + peer.common_number, + best_queued, + ); + let current = std::cmp::min(peer.best_number, best_queued); + peer.state = PeerSyncState::AncestorSearch { + current, + start: best_queued, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }; + Some((*peer_id, ancestry_request::(current))) } else if let Some((range, req)) = peer_block_request( &peer_id, peer, @@ -1629,25 +1621,17 @@ where last_finalized, best_queued, ) { - if self.peer_pool.try_reserve_peer(&peer_id) { - peer.state = PeerSyncState::DownloadingNew(range.start); - trace!( - target: LOG_TARGET, - "New block request for {}, (best:{}, common:{}) {:?}", - peer_id, - peer.best_number, - peer.common_number, - req, - ); - Some((peer_id, req)) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} that was just returned as available.", - ); - debug_assert!(false); - None - } + available_peer.reserve(); + peer.state = PeerSyncState::DownloadingNew(range.start); + trace!( + target: LOG_TARGET, + "New block request for {}, (best:{}, common:{}) {:?}", + peer_id, + peer.best_number, + peer.common_number, + req, + ); + Some((*peer_id, req)) } else if let Some((hash, req)) = fork_sync_request( &peer_id, fork_targets, @@ -1663,18 +1647,10 @@ where }, max_blocks_per_request, ) { - if self.peer_pool.try_reserve_peer(&peer_id) { - trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {peer_id}"); - peer.state = PeerSyncState::DownloadingStale(hash); - Some((peer_id, req)) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} that was just returned as available.", - ); - debug_assert!(false); - None - } + available_peer.reserve(); + trace!(target: LOG_TARGET, "Downloading fork {hash:?} from {peer_id}"); + peer.state = PeerSyncState::DownloadingStale(hash); + Some((*peer_id, req)) } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { peer_gap_block_request( &peer_id, @@ -1686,25 +1662,17 @@ where max_blocks_per_request, ) }) { - if self.peer_pool.try_reserve_peer(&peer_id) { - peer.state = PeerSyncState::DownloadingGap(range.start); - trace!( - target: LOG_TARGET, - "New gap block request for {}, (best:{}, common:{}) {:?}", - peer_id, - peer.best_number, - peer.common_number, - req, - ); - Some((peer_id, req)) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} that was just returned as available.", - ); - debug_assert!(false); - None - } + available_peer.reserve(); + peer.state = PeerSyncState::DownloadingGap(range.start); + trace!( + target: LOG_TARGET, + "New gap block request for {}, (best:{}, common:{}) {:?}", + peer_id, + peer.best_number, + peer.common_number, + req, + ); + Some((*peer_id, req)) } else { None } @@ -1737,26 +1705,19 @@ where return None } - for peer_id in self.peer_pool.available_peers() { + for mut available_peer in self.peer_pool.lock().available_peers() { + let peer_id = available_peer.peer_id(); if let Some(peer) = self.peers.get_mut(&peer_id) { if peer.state.is_available() && peer.common_number >= sync.target_number() { - if self.peer_pool.try_reserve_peer(&peer_id) { - peer.state = PeerSyncState::DownloadingState; - let request = sync.next_request(); - trace!( - target: LOG_TARGET, - "New StateRequest for {peer_id}: {request:?}", - ); - self.allowed_requests.clear(); - return Some((peer_id, OpaqueStateRequest(Box::new(request)))) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} in the peer pool that was just \ - returned as available (`ChainSync`).", - ); - debug_assert!(false); - } + available_peer.reserve(); + peer.state = PeerSyncState::DownloadingState; + let request = sync.next_request(); + trace!( + target: LOG_TARGET, + "New StateRequest for {peer_id}: {request:?}", + ); + self.allowed_requests.clear(); + return Some((*peer_id, OpaqueStateRequest(Box::new(request)))) } } else { warn!( @@ -1790,7 +1751,7 @@ where if let PeerSyncState::DownloadingState = peer.state { peer.state = PeerSyncState::Available; self.allowed_requests.set_all(); - self.peer_pool.free_peer(peer_id); + self.peer_pool.lock().free_peer(peer_id); } } let import_result = if let Some(sync) = &mut self.state_sync { diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index f6e01f48b4a9..c15f6c4c4557 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -27,6 +27,7 @@ use crate::{ }; use libp2p::PeerId; use log::{debug, error, trace, warn}; +use parking_lot::Mutex; use sc_client_api::ProofProvider; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::BlockAnnounce; @@ -64,12 +65,6 @@ enum PeerState { DownloadingState, } -impl PeerState { - fn is_available(&self) -> bool { - matches!(self, PeerState::Available) - } -} - struct Peer { best_number: NumberFor, state: PeerState, @@ -81,7 +76,7 @@ pub struct StateStrategy { peers: HashMap>, actions: Vec>, succeded: bool, - peer_pool: PeerPool, + peer_pool: Arc>, } impl StateStrategy { @@ -93,7 +88,7 @@ impl StateStrategy { target_justifications: Option, skip_proof: bool, initial_peers: impl Iterator)>, - peer_pool: PeerPool, + peer_pool: Arc>, ) -> Self where Client: ProofProvider + Send + Sync + 'static, @@ -124,7 +119,7 @@ impl StateStrategy { fn new_with_provider( state_sync_provider: Box>, initial_peers: impl Iterator)>, - peer_pool: PeerPool, + peer_pool: Arc>, ) -> Self { Self { state_sync: state_sync_provider, @@ -185,7 +180,7 @@ impl StateStrategy { if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } - self.peer_pool.free_peer(&peer_id); + self.peer_pool.lock().free_peer(&peer_id); let response: Box = response.0.downcast().map_err(|_error| { error!( @@ -311,20 +306,13 @@ impl StateStrategy { let threshold = std::cmp::max(median, min_best_number); // Find a random peer that is synced as much as peer majority and is above // `min_best_number`. - for peer_id in self.peer_pool.available_peers() { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.state.is_available() && peer.best_number >= threshold { - if self.peer_pool.try_reserve_peer(&peer_id) { - peer.state = new_state; - return Some(peer_id) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} in the peer pool that was \ - just returned as available (`StateStrategy`).", - ); - debug_assert!(false); - } + for mut available_peer in self.peer_pool.lock().available_peers() { + let peer_id = available_peer.peer_id(); + if let Some(peer) = self.peers.get_mut(peer_id) { + if peer.best_number >= threshold { + available_peer.reserve(); + peer.state = new_state; + return Some(*peer_id) } } else { warn!( diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 15118ba40213..782e209f59c6 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -30,6 +30,7 @@ use codec::{Decode, Encode}; use futures::channel::oneshot; use libp2p::PeerId; use log::{debug, error, trace, warn}; +use parking_lot::Mutex; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; @@ -205,12 +206,6 @@ enum PeerState { DownloadingTargetBlock, } -impl PeerState { - fn is_available(&self) -> bool { - matches!(self, PeerState::Available) - } -} - struct Peer { best_number: NumberFor, state: PeerState, @@ -243,7 +238,7 @@ pub struct WarpSync { peers: HashMap>, actions: Vec>, result: Option>, - peer_pool: PeerPool, + peer_pool: Arc>, } impl WarpSync @@ -257,7 +252,7 @@ where pub fn new( client: Arc, warp_sync_config: WarpSyncConfig, - peer_pool: PeerPool, + peer_pool: Arc>, ) -> Self { if client.info().finalized_state.is_some() { error!( @@ -363,7 +358,7 @@ where if let Some(peer) = self.peers.get_mut(peer_id) { peer.state = PeerState::Available; } - self.peer_pool.free_peer(&peer_id); + self.peer_pool.lock().free_peer(&peer_id); let Phase::WarpProof { set_id, authorities, last_hash, warp_sync_provider } = &mut self.phase @@ -422,7 +417,7 @@ where if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } - self.peer_pool.free_peer(&peer_id); + self.peer_pool.lock().free_peer(&peer_id); let Phase::TargetBlock(header) = &mut self.phase else { debug!(target: LOG_TARGET, "Unexpected target block response from {peer_id}"); @@ -499,20 +494,13 @@ where let threshold = std::cmp::max(median, min_best_number.unwrap_or(Zero::zero())); // Find a random available peer that is synced as much as peer majority and is above // `min_best_number`. - for peer_id in self.peer_pool.available_peers() { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.state.is_available() && peer.best_number >= threshold { - if self.peer_pool.try_reserve_peer(&peer_id) { - peer.state = new_state; - return Some(peer_id) - } else { - warn!( - target: LOG_TARGET, - "Failed to reserve peer {peer_id} in the peer pool that was \ - just returned as available (`WarpSync`).", - ); - debug_assert!(false); - } + for mut available_peer in self.peer_pool.lock().available_peers() { + let peer_id = available_peer.peer_id(); + if let Some(peer) = self.peers.get_mut(peer_id) { + if peer.best_number >= threshold { + available_peer.reserve(); + peer.state = new_state; + return Some(*peer_id) } } else { warn!( From fd53c4403e5f3704359ad01f1b2f962f425f04a0 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 19 Jan 2024 14:47:42 +0200 Subject: [PATCH 13/31] Update `WarpStrategy` and `StateStrategy` tests --- .../client/network/sync/src/strategy/state.rs | 54 +++++++------- .../client/network/sync/src/strategy/warp.rs | 71 ++++++++++--------- 2 files changed, 66 insertions(+), 59 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index c15f6c4c4557..d833262ec0b8 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -437,8 +437,8 @@ mod test { .map(|best_number| (PeerId::random(), best_number)) .collect::>(); let initial_peers = peers.iter().map(|(p, n)| (*p, *n)); - let peer_pool: PeerPool = Default::default(); - peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + peers.iter().for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -473,8 +473,8 @@ mod test { .map(|best_number| (PeerId::random(), best_number)) .collect::>(); let initial_peers = peers.iter().map(|(p, n)| (*p, *n)); - let peer_pool: PeerPool = Default::default(); - peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + peers.iter().for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -505,8 +505,10 @@ mod test { let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool: PeerPool = Default::default(); - initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + initial_peers + .iter() + .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -539,8 +541,10 @@ mod test { let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool: PeerPool = Default::default(); - initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + initial_peers + .iter() + .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -565,23 +569,23 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool: PeerPool = Default::default(); - peer_pool.add_peer(peer_id); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + peer_pool.lock().add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state. - assert!(peer_pool.try_reserve_peer(&peer_id)); + assert!(peer_pool.lock().try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); state_strategy.on_state_response(peer_id, dummy_response); - assert!(state_strategy.peers.get(&peer_id).unwrap().state.is_available()); + assert!(matches!(state_strategy.peers.get(&peer_id).unwrap().state, PeerState::Available)); // Peer is also available in `PeerPool`. - assert!(peer_pool.try_reserve_peer(&peer_id)); + assert!(peer_pool.lock().try_reserve_peer(&peer_id)); } #[test] @@ -591,15 +595,15 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::BadResponse); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool: PeerPool = Default::default(); - peer_pool.add_peer(peer_id); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + peer_pool.lock().add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state. - assert!(peer_pool.try_reserve_peer(&peer_id)); + assert!(peer_pool.lock().try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; // Receiving a response drops the peer. @@ -617,15 +621,15 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool: PeerPool = Default::default(); - peer_pool.add_peer(peer_id); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + peer_pool.lock().add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state . - assert!(peer_pool.try_reserve_peer(&peer_id)); + assert!(peer_pool.lock().try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); @@ -682,15 +686,15 @@ mod test { // Prepare `StateStrategy`. let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool: PeerPool = Default::default(); - peer_pool.add_peer(peer_id); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + peer_pool.lock().add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state . - assert!(peer_pool.try_reserve_peer(&peer_id)); + assert!(peer_pool.lock().try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; // Receive response. @@ -796,8 +800,10 @@ mod test { // Get enough peers for possible spurious requests. let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool: PeerPool = Default::default(); - initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + initial_peers + .iter() + .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 782e209f59c6..774e0e0f4310 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -663,6 +663,7 @@ where #[cfg(test)] mod test { use super::*; + use parking_lot::Mutex; use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::{BlockStatus, Error as BlockchainError, HeaderBackend, Info}; use sp_consensus_grandpa::{AuthorityList, SetId}; @@ -802,20 +803,20 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Warp sync is not started when there is not enough peers. for _ in 0..(MIN_PEERS_TO_START_WARP_SYNC - 1) { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), 10); assert!(matches!(warp_sync.phase, Phase::WaitingForPeers { .. })) } // Now we have enough peers and warp sync is started. let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), 10); assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })) } @@ -849,12 +850,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -873,12 +874,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -896,13 +897,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -922,13 +923,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -956,13 +957,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make requests. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -986,13 +987,13 @@ mod test { Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure"))) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1030,13 +1031,13 @@ mod test { Ok(VerificationResult::Partial(set_id, authorities, Hash::random())) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1077,13 +1078,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1113,13 +1114,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } // We are not in `Phase::TargetBlock` @@ -1151,13 +1152,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1186,13 +1187,13 @@ mod test { .block; let target_header = target_block.header().clone(); let config = WarpSyncConfig::WaitForTarget; - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1233,13 +1234,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1274,13 +1275,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1331,13 +1332,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1411,13 +1412,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1467,13 +1468,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool: PeerPool = Default::default(); + let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.add_peer(peer_id); + peer_pool.lock().add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } From fa64365fe979c635a01ff0dd581bbac520f7e974 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 19 Jan 2024 15:12:55 +0200 Subject: [PATCH 14/31] WIP: fix `ChainSync` tests --- substrate/client/network/sync/src/strategy.rs | 1 + .../sync/src/strategy/chain_sync/test.rs | 65 ++++++++++++++++--- 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 205f1b032eb3..cdf17b8d4ca8 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -72,6 +72,7 @@ pub struct SyncingConfig { pub metrics_registry: Option, } +#[derive(Debug)] enum PeerStatus { Available, Reserved, diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index c89096bc6c90..fa149d967908 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -20,6 +20,7 @@ use super::*; use futures::executor::block_on; +use parking_lot::Mutex; use sc_block_builder::BlockBuilderBuilder; use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; @@ -37,8 +38,11 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { let client = Arc::new(TestClientBuilder::new().build()); let peer_id = PeerId::random(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); let (a1_hash, a1_number) = { let a1 = BlockBuilderBuilder::new(&*client) @@ -53,6 +57,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { }; // add a new peer with the same best block + peer_pool.lock().add_peer(peer_id); sync.add_peer(peer_id, a1_hash, a1_number); // and request a justification for the block @@ -90,8 +95,11 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -117,6 +125,8 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally + peer_pool.lock().add_peer(peer_id1); + peer_pool.lock().add_peer(peer_id2); sync.add_peer(peer_id1, Hash::random(), 42); sync.add_peer(peer_id2, Hash::random(), 10); @@ -128,6 +138,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // add a new peer at a known block + peer_pool.lock().add_peer(peer_id3); sync.add_peer(peer_id3, b1_hash, b1_number); // we request a justification for a block we have locally @@ -274,8 +285,11 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let mut client = Arc::new(TestClientBuilder::new().build()); let info = client.info(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -283,8 +297,12 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let best_block = blocks.last().unwrap().clone(); let max_blocks_to_request = sync.max_blocks_per_request; // Connect the node we will sync from + peer_pool.lock().add_peer(peer_id1); + peer_pool.lock().add_peer(peer_id2); sync.add_peer(peer_id1, best_block.hash(), *best_block.header().number()); sync.add_peer(peer_id2, info.best_hash, 0); + // Transit peers into requestable state + let _ = sync.handle_new_peers().collect::>(); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -420,8 +438,11 @@ fn can_sync_huge_fork() { }; let info = client.info(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) + .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); @@ -432,6 +453,7 @@ fn can_sync_huge_fork() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from + peer_pool.lock().add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); @@ -553,8 +575,11 @@ fn syncs_fork_without_duplicate_requests() { }; let info = client.info(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) + .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); @@ -565,6 +590,7 @@ fn syncs_fork_without_duplicate_requests() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from + peer_pool.lock().add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); @@ -688,12 +714,16 @@ fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from + peer_pool.lock().add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); // Create a "new" header and announce it @@ -713,13 +743,19 @@ fn can_import_response_with_missing_blocks() { let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); let empty_client = Arc::new(TestClientBuilder::new().build()); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = ChainSync::new(ChainSyncMode::Full, empty_client.clone(), 1, 64, None).unwrap(); + let mut sync = + ChainSync::new(ChainSyncMode::Full, empty_client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); + peer_pool.lock().add_peer(peer_id1); sync.add_peer(peer_id1, best_block.hash(), *best_block.header().number()); + // Manually prepare the peer for a block request. + sync.allowed_requests.add(&peer_id1); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; @@ -745,7 +781,10 @@ fn ancestor_search_repeat() { #[test] fn sync_restart_removes_block_but_not_justification_requests() { let mut client = Arc::new(TestClientBuilder::new().build()); - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); let peers = vec![PeerId::random(), PeerId::random()]; @@ -769,6 +808,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { let (b1_hash, b1_number) = new_blocks(50); // add new peer and request blocks from them + peer_pool.lock().add_peer(peers[0]); sync.add_peer(peers[0], Hash::random(), 42); // we don't actually perform any requests, just keep track of peers waiting for a response @@ -782,6 +822,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { } // add a new peer at a known block + peer_pool.lock().add_peer(peers[1]); sync.add_peer(peers[1], b1_hash, b1_number); // we request a justification for a block we have locally @@ -887,13 +928,19 @@ fn request_across_forks() { fork_blocks }; - let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) + .unwrap(); // Add the peers, all at the common ancestor 100. let common_block = blocks.last().unwrap(); let peer_id1 = PeerId::random(); + peer_pool.lock().add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); let peer_id2 = PeerId::random(); + peer_pool.lock().add_peer(peer_id2); sync.add_peer(peer_id2, common_block.hash(), *common_block.header().number()); // Peer 1 announces 107 from fork 1, 100-107 get downloaded. From 272fbc669013c439c369b398c6d87ea7dc65188a Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 19 Jan 2024 18:52:24 +0200 Subject: [PATCH 15/31] Fix sync restart and `ChainSync` tests --- .../network/sync/src/strategy/chain_sync.rs | 56 ++++++++++++------- .../sync/src/strategy/chain_sync/test.rs | 53 ++++++++++++------ 2 files changed, 72 insertions(+), 37 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index da39a1312a0d..de75e209f164 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -1353,28 +1353,39 @@ where ); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().for_each(|(peer_id, mut p)| { - // peers that were downloading justifications - // should be kept in that state. - if let PeerSyncState::DownloadingJustification(_) = p.state { - // We make sure our commmon number is at least something we have. - trace!( - target: LOG_TARGET, - "Keeping peer {} after restart, updating common number from={} => to={} (our best).", - peer_id, - p.common_number, - self.best_queued_number, - ); - p.common_number = self.best_queued_number; - self.peers.insert(peer_id, p); - return + old_peers.into_iter().for_each(|(peer_id, mut peer_sync)| { + match peer_sync.state { + PeerSyncState::New | PeerSyncState::Available => { + self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); + }, + PeerSyncState::AncestorSearch { .. } | + PeerSyncState::DownloadingNew(_) | + PeerSyncState::DownloadingStale(_) | + PeerSyncState::DownloadingGap(_) => { + self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); + self.actions.push(ChainSyncAction::CancelBlockRequest { peer_id }); + self.peer_pool.lock().free_peer(&peer_id); + }, + PeerSyncState::DownloadingJustification(_) => { + // Peers that were downloading justifications + // should be kept in that state. + // We make sure our commmon number is at least something we have. + trace!( + target: LOG_TARGET, + "Keeping peer {} after restart, updating common number from={} => to={} (our best).", + peer_id, + peer_sync.common_number, + self.best_queued_number, + ); + peer_sync.common_number = self.best_queued_number; + self.peers.insert(peer_id, peer_sync); + }, + PeerSyncState::DownloadingState => { + self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); + self.peer_pool.lock().free_peer(&peer_id); + // FIXME: is it safe to not cancel state request here? + }, } - - // handle peers that were in other states. - self.add_peer(peer_id, p.best_hash, p.best_number); - - // since the request is not a justification, remove it from pending responses - self.actions.push(ChainSyncAction::CancelBlockRequest { peer_id }); }); } @@ -2332,3 +2343,6 @@ pub fn validate_blocks( Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) } + +// TODO: make state requests respect `PeerPool`. +// TODO: make (extra) justification requests respect `PeerPool`. diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index fa149d967908..1c645989971c 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -59,6 +59,8 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { // add a new peer with the same best block peer_pool.lock().add_peer(peer_id); sync.add_peer(peer_id, a1_hash, a1_number); + // Transit the peer into a requestable state + let _ = sync.handle_new_peers().collect::>(); // and request a justification for the block sync.request_justification(&a1_hash, a1_number); @@ -97,9 +99,10 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + // we request max 8 blocks to always initiate block requests to both peers for the test to be + // deterministic let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 8, None, peer_pool.clone()).unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -127,19 +130,26 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { // add 2 peers at blocks that we don't have locally peer_pool.lock().add_peer(peer_id1); peer_pool.lock().add_peer(peer_id2); + // note peer 2 is at block 10 > 8, so we always have something to request from it, even if the + // first request went to peer 1 sync.add_peer(peer_id1, Hash::random(), 42); sync.add_peer(peer_id2, Hash::random(), 10); // we wil send block requests to these peers // for these blocks we don't know about - assert!(sync - .block_requests() - .into_iter() - .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + let actions = sync.actions().collect::>(); + assert_eq!(actions.len(), 2); + assert!(actions.iter().all(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => + peer_id == &peer_id1 || peer_id == &peer_id2, + _ => false, + })); // add a new peer at a known block peer_pool.lock().add_peer(peer_id3); sync.add_peer(peer_id3, b1_hash, b1_number); + // transit the peer into a requestable state + let _ = sync.handle_new_peers().collect::>(); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -157,22 +167,27 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { PeerSyncState::DownloadingJustification(b1_hash), ); - // clear old actions + // drop old actions let _ = sync.take_actions(); // we restart the sync state sync.restart(); - let actions = sync.take_actions().collect::>(); - // which should make us send out block requests to the first two peers - assert_eq!(actions.len(), 2); - assert!(actions.iter().all(|action| match action { + // which should make us cancel and send out again block requests to the first two peers + let actions = sync.actions().collect::>(); + assert_eq!(actions.len(), 4); + assert!(actions.iter().take(2).all(|action| match action { + ChainSyncAction::CancelBlockRequest { peer_id, .. } => + peer_id == &peer_id1 || peer_id == &peer_id2, + _ => false, + })); + assert!(actions.iter().skip(2).all(|action| match action { ChainSyncAction::SendBlockRequest { peer_id, .. } => peer_id == &peer_id1 || peer_id == &peer_id2, _ => false, })); - // peer 3 should be unaffected it was downloading finality data + // peer 3 should be unaffected as it was downloading finality data assert_eq!( sync.peers.get(&peer_id3).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), @@ -807,12 +822,15 @@ fn sync_restart_removes_block_but_not_justification_requests() { let (b1_hash, b1_number) = new_blocks(50); + // we don't actually perform any requests, just keep track of peers waiting for a response + let mut pending_responses = HashSet::new(); + // add new peer and request blocks from them peer_pool.lock().add_peer(peers[0]); sync.add_peer(peers[0], Hash::random(), 42); - - // we don't actually perform any requests, just keep track of peers waiting for a response - let mut pending_responses = HashSet::new(); + sync.handle_new_peers().for_each(|result| { + pending_responses.insert(result.unwrap().0); + }); // we wil send block requests to these peers // for these blocks we don't know about @@ -824,6 +842,9 @@ fn sync_restart_removes_block_but_not_justification_requests() { // add a new peer at a known block peer_pool.lock().add_peer(peers[1]); sync.add_peer(peers[1], b1_hash, b1_number); + sync.handle_new_peers().for_each(|result| { + pending_responses.insert(result.unwrap().0); + }); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -851,7 +872,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { // restart sync sync.restart(); - let actions = sync.take_actions().collect::>(); + let actions = sync.actions().collect::>(); for action in actions.iter() { match action { ChainSyncAction::CancelBlockRequest { peer_id } => { From d8008e6828c78f3ff900da9f65e57bab13d09ce0 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 26 Jan 2024 16:35:07 +0200 Subject: [PATCH 16/31] Respect `PeerPool` when making extra justification requests --- .../client/network/sync/src/extra_requests.rs | 81 ++++++++++++------- substrate/client/network/sync/src/strategy.rs | 12 +-- .../network/sync/src/strategy/chain_sync.rs | 8 +- 3 files changed, 59 insertions(+), 42 deletions(-) diff --git a/substrate/client/network/sync/src/extra_requests.rs b/substrate/client/network/sync/src/extra_requests.rs index cd3008d270b1..7d83667f5d58 100644 --- a/substrate/client/network/sync/src/extra_requests.rs +++ b/substrate/client/network/sync/src/extra_requests.rs @@ -18,16 +18,18 @@ use crate::{ request_metrics::Metrics, - strategy::chain_sync::{PeerSync, PeerSyncState}, + strategy::{chain_sync::PeerSync, PeerPool}, LOG_TARGET, }; use fork_tree::ForkTree; use libp2p::PeerId; -use log::{debug, trace, warn}; +use log::{debug, error, trace, warn}; +use parking_lot::Mutex; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ collections::{HashMap, HashSet, VecDeque}, + sync::Arc, time::{Duration, Instant}, }; @@ -288,6 +290,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { pub(crate) fn next( &mut self, peers: &HashMap>, + peer_pool: &Arc>, ) -> Option<(PeerId, ExtraRequest)> { if self.remaining == 0 { return None @@ -299,36 +302,41 @@ impl<'a, B: BlockT> Matcher<'a, B> { } while let Some(request) = self.extras.pending_requests.pop_front() { - for (peer, sync) in - peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) - { + for mut available_peer in peer_pool.lock().available_peers() { + let peer_id = available_peer.peer_id(); + let Some(sync) = peers.get(peer_id) else { + error!( + target: LOG_TARGET, + "Peer {peer_id} is available, but not known to `ChainSync`", + ); + debug_assert!(false); + continue + }; // only ask peers that have synced at least up to the block number that we're asking // the extra for if sync.best_number < request.1 { continue } - // don't request to any peers that already have pending requests - if self.extras.active_requests.contains_key(peer) { - continue - } // only ask if the same request has not failed for this peer before if self .extras .failed_requests .get(&request) - .map(|rr| rr.iter().any(|i| &i.0 == peer)) + .map(|rr| rr.iter().any(|i| &i.0 == peer_id)) .unwrap_or(false) { continue } - self.extras.active_requests.insert(*peer, request); + + available_peer.reserve(); + self.extras.active_requests.insert(*peer_id, request); trace!(target: LOG_TARGET, "Sending {} request to {:?} for {:?}", - self.extras.request_type_name, peer, request, + self.extras.request_type_name, peer_id, request, ); - return Some((*peer, request)) + return Some((*peer_id, request)) } self.extras.pending_requests.push_back(request); @@ -346,19 +354,26 @@ impl<'a, B: BlockT> Matcher<'a, B> { #[cfg(test)] mod tests { use super::*; - use crate::strategy::chain_sync::PeerSync; + use crate::strategy::{ + chain_sync::{PeerSync, PeerSyncState}, + PeerPool, + }; + use parking_lot::Mutex; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sp_blockchain::Error as ClientError; use sp_test_primitives::{Block, BlockNumber, Hash}; - use std::collections::{HashMap, HashSet}; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; #[test] fn requests_are_processed_in_order() { - fn property(mut peers: ArbitraryPeers) { + fn property(mut ap: ArbitraryPeers) { let mut requests = ExtraRequests::::new("test"); let num_peers_available = - peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); + ap.peers.values().filter(|s| s.state == PeerSyncState::Available).count(); for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) @@ -368,9 +383,9 @@ mod tests { let mut m = requests.matcher(); for p in &pending { - let (peer, r) = m.next(&peers.0).unwrap(); + let (peer, r) = m.next(&ap.peers, &ap.peer_pool).unwrap(); assert_eq!(p, &r); - peers.0.get_mut(&peer).unwrap().state = + ap.peers.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); } } @@ -397,19 +412,19 @@ mod tests { #[test] fn disconnecting_implies_rescheduling() { - fn property(mut peers: ArbitraryPeers) -> bool { + fn property(mut ap: ArbitraryPeers) -> bool { let mut requests = ExtraRequests::::new("test"); let num_peers_available = - peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); + ap.peers.values().filter(|s| s.state == PeerSyncState::Available).count(); for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } let mut m = requests.matcher(); - while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = + while let Some((peer, r)) = m.next(&ap.peers, &ap.peer_pool) { + ap.peers.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); } @@ -433,19 +448,19 @@ mod tests { #[test] fn no_response_reschedules() { - fn property(mut peers: ArbitraryPeers) { + fn property(mut ap: ArbitraryPeers) { let mut requests = ExtraRequests::::new("test"); let num_peers_available = - peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); + ap.peers.values().filter(|s| s.state == PeerSyncState::Available).count(); for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } let mut m = requests.matcher(); - while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = + while let Some((peer, r)) = m.next(&ap.peers, &ap.peer_pool) { + ap.peers.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); } @@ -570,16 +585,22 @@ mod tests { } #[derive(Debug, Clone)] - struct ArbitraryPeers(HashMap>); + struct ArbitraryPeers { + peers: HashMap>, + peer_pool: Arc>, + } impl Arbitrary for ArbitraryPeers { fn arbitrary(g: &mut Gen) -> Self { let mut peers = HashMap::with_capacity(g.size()); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); for _ in 0..g.size() { let ps = ArbitraryPeerSync::arbitrary(g).0; - peers.insert(ps.peer_id, ps); + let peer_id = ps.peer_id; + peers.insert(peer_id, ps); + peer_pool.lock().add_peer(peer_id); } - ArbitraryPeers(peers) + ArbitraryPeers { peers, peer_pool } } } } diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index cdf17b8d4ca8..5064d6092997 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -84,7 +84,7 @@ impl PeerStatus { } } -#[derive(Default)] +#[derive(Default, Debug)] pub struct PeerPool { peers: HashMap, } @@ -105,21 +105,21 @@ impl<'a> AvailablePeer<'a> { } impl PeerPool { - fn add_peer(&mut self, peer_id: PeerId) { + pub fn add_peer(&mut self, peer_id: PeerId) { self.peers.insert(peer_id, PeerStatus::Available); } - fn remove_peer(&mut self, peer_id: &PeerId) { + pub fn remove_peer(&mut self, peer_id: &PeerId) { self.peers.remove(peer_id); } - fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { + pub fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { self.peers.iter_mut().filter_map(|(peer_id, status)| { status.is_available().then_some(AvailablePeer::<'a> { peer_id, status }) }) } - fn try_reserve_peer(&mut self, peer_id: &PeerId) -> bool { + pub fn try_reserve_peer(&mut self, peer_id: &PeerId) -> bool { match self.peers.get_mut(peer_id) { Some(peer_status) => match peer_status { PeerStatus::Available => { @@ -135,7 +135,7 @@ impl PeerPool { } } - fn free_peer(&mut self, peer_id: &PeerId) { + pub fn free_peer(&mut self, peer_id: &PeerId) { match self.peers.get_mut(peer_id) { Some(peer_status) => match peer_status { PeerStatus::Available => { diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index de75e209f164..df7617082fc6 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -1534,11 +1534,10 @@ where /// Get justification requests scheduled by sync to be sent out. fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; - // TODO: filter peers to only include `self.peer_pool.available_peers()`. - // As an option, pass available peers to `matcher()`. + let peer_pool = &self.peer_pool; let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { - if let Some((peer_id, request)) = matcher.next(peers) { + if let Some((peer_id, request)) = matcher.next(peers, peer_pool) { // TODO: reserve the peer in `PeerPool`. peers .get_mut(&peer_id) @@ -2343,6 +2342,3 @@ pub fn validate_blocks( Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) } - -// TODO: make state requests respect `PeerPool`. -// TODO: make (extra) justification requests respect `PeerPool`. From 06295e28bf9a3cef2d3cf4931a3090a8361c525c Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 26 Jan 2024 16:44:52 +0200 Subject: [PATCH 17/31] Cancel a stale state request when restarting the sync --- substrate/client/network/sync/src/engine.rs | 2 +- substrate/client/network/sync/src/strategy.rs | 8 ++++---- .../client/network/sync/src/strategy/chain_sync.rs | 14 +++++--------- .../network/sync/src/strategy/chain_sync/test.rs | 4 ++-- 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 1787acbe2c07..30c8ee23a6d7 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -704,7 +704,7 @@ where removed, ) }, - SyncingAction::CancelBlockRequest { peer_id } => { + SyncingAction::CancelRequest { peer_id } => { let removed = self.pending_responses.remove(&peer_id); trace!( diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 5064d6092997..e8f3802393a5 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -156,12 +156,12 @@ impl PeerPool { pub enum SyncingAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Drop stale block request. - CancelBlockRequest { peer_id: PeerId }, /// Send state request to peer. SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, /// Send warp proof request to peer. SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest }, + /// Drop stale request. + CancelRequest { peer_id: PeerId }, /// Peer misbehaved. Disconnect, report it and cancel any requests to it. DropPeer(BadPeer), /// Import blocks. @@ -500,10 +500,10 @@ where .map(|action| match action { ChainSyncAction::SendBlockRequest { peer_id, request } => SyncingAction::SendBlockRequest { peer_id, request }, - ChainSyncAction::CancelBlockRequest { peer_id } => - SyncingAction::CancelBlockRequest { peer_id }, ChainSyncAction::SendStateRequest { peer_id, request } => SyncingAction::SendStateRequest { peer_id, request }, + ChainSyncAction::CancelRequest { peer_id } => + SyncingAction::CancelRequest { peer_id }, ChainSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), ChainSyncAction::ImportBlocks { origin, blocks } => SyncingAction::ImportBlocks { origin, blocks }, diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index df7617082fc6..9f5dc6bab41d 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -214,10 +214,10 @@ struct GapSync { pub enum ChainSyncAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Drop stale block request. - CancelBlockRequest { peer_id: PeerId }, /// Send state request to peer. SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, + /// Drop stale request. + CancelRequest { peer_id: PeerId }, /// Peer misbehaved. Disconnect, report it and cancel the block request to it. DropPeer(BadPeer), /// Import blocks. @@ -1361,9 +1361,10 @@ where PeerSyncState::AncestorSearch { .. } | PeerSyncState::DownloadingNew(_) | PeerSyncState::DownloadingStale(_) | - PeerSyncState::DownloadingGap(_) => { + PeerSyncState::DownloadingGap(_) | + PeerSyncState::DownloadingState => { self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); - self.actions.push(ChainSyncAction::CancelBlockRequest { peer_id }); + self.actions.push(ChainSyncAction::CancelRequest { peer_id }); self.peer_pool.lock().free_peer(&peer_id); }, PeerSyncState::DownloadingJustification(_) => { @@ -1380,11 +1381,6 @@ where peer_sync.common_number = self.best_queued_number; self.peers.insert(peer_id, peer_sync); }, - PeerSyncState::DownloadingState => { - self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); - self.peer_pool.lock().free_peer(&peer_id); - // FIXME: is it safe to not cancel state request here? - }, } }); } diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 1c645989971c..934d443b2986 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -177,7 +177,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let actions = sync.actions().collect::>(); assert_eq!(actions.len(), 4); assert!(actions.iter().take(2).all(|action| match action { - ChainSyncAction::CancelBlockRequest { peer_id, .. } => + ChainSyncAction::CancelRequest { peer_id, .. } => peer_id == &peer_id1 || peer_id == &peer_id2, _ => false, })); @@ -875,7 +875,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { let actions = sync.actions().collect::>(); for action in actions.iter() { match action { - ChainSyncAction::CancelBlockRequest { peer_id } => { + ChainSyncAction::CancelRequest { peer_id } => { pending_responses.remove(&peer_id); }, ChainSyncAction::SendBlockRequest { peer_id, .. } => { From cf93e7f39dbf61bcf876ca960d8debad1dbcd39c Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Jan 2024 14:55:10 +0200 Subject: [PATCH 18/31] Simplify startegy actions conversion into top-level `SyncingAction` --- substrate/client/network/sync/src/engine.rs | 2 + substrate/client/network/sync/src/strategy.rs | 117 +++++++++--------- 2 files changed, 63 insertions(+), 56 deletions(-) diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 30c8ee23a6d7..e4d8bdcc2b3b 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -758,6 +758,8 @@ where number, ) }, + // Nothing to do, this is handled internally by `SyncingStrategy`. + SyncingAction::Finished => {}, } } diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 0b59495f4482..7c966084e180 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -173,6 +173,57 @@ pub enum SyncingAction { number: NumberFor, justifications: Justifications, }, + /// Strategy finished. Nothing to do, this is handled by `SyncingStrategy`. + Finished, +} + +impl SyncingAction { + fn is_finished(&self) -> bool { + matches!(self, SyncingAction::Finished) + } +} + +impl From> for SyncingAction { + fn from(action: WarpSyncAction) -> Self { + match action { + WarpSyncAction::SendWarpProofRequest { peer_id, request } => + SyncingAction::SendWarpProofRequest { peer_id, request }, + WarpSyncAction::SendBlockRequest { peer_id, request } => + SyncingAction::SendBlockRequest { peer_id, request }, + WarpSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), + WarpSyncAction::Finished => SyncingAction::Finished, + } + } +} + +impl From> for SyncingAction { + fn from(action: StateStrategyAction) -> Self { + match action { + StateStrategyAction::SendStateRequest { peer_id, request } => + SyncingAction::SendStateRequest { peer_id, request }, + StateStrategyAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), + StateStrategyAction::ImportBlocks { origin, blocks } => + SyncingAction::ImportBlocks { origin, blocks }, + StateStrategyAction::Finished => SyncingAction::Finished, + } + } +} + +impl From> for SyncingAction { + fn from(action: ChainSyncAction) -> Self { + match action { + ChainSyncAction::SendBlockRequest { peer_id, request } => + SyncingAction::SendBlockRequest { peer_id, request }, + ChainSyncAction::SendStateRequest { peer_id, request } => + SyncingAction::SendStateRequest { peer_id, request }, + ChainSyncAction::CancelRequest { peer_id } => SyncingAction::CancelRequest { peer_id }, + ChainSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), + ChainSyncAction::ImportBlocks { origin, blocks } => + SyncingAction::ImportBlocks { origin, blocks }, + ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => + SyncingAction::ImportJustifications { peer_id, hash, number, justifications }, + } + } } /// Proxy to specific syncing strategies. @@ -462,67 +513,21 @@ where pub fn actions(&mut self) -> Result>, ClientError> { // This function presumes that strategies are executed serially and must be refactored once // we have parallel startegies. - if let Some(ref mut warp) = self.warp { - let mut actions = Vec::new(); - for action in warp.actions() { - actions.push(match action { - WarpSyncAction::SendWarpProofRequest { peer_id, request } => - SyncingAction::SendWarpProofRequest { peer_id, request }, - WarpSyncAction::SendBlockRequest { peer_id, request } => - SyncingAction::SendBlockRequest { peer_id, request }, - WarpSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - WarpSyncAction::Finished => { - self.proceed_to_next()?; - return Ok(actions) - }, - }); - } - Ok(actions) + let actions: Vec<_> = if let Some(ref mut warp) = self.warp { + warp.actions().map(Into::into).collect() } else if let Some(ref mut state) = self.state { - let mut actions = Vec::new(); - for action in state.actions() { - actions.push(match action { - StateStrategyAction::SendStateRequest { peer_id, request } => - SyncingAction::SendStateRequest { peer_id, request }, - StateStrategyAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - StateStrategyAction::ImportBlocks { origin, blocks } => - SyncingAction::ImportBlocks { origin, blocks }, - StateStrategyAction::Finished => { - self.proceed_to_next()?; - return Ok(actions) - }, - }); - } - Ok(actions) + state.actions().map(Into::into).collect() } else if let Some(ref mut chain_sync) = self.chain_sync { - Ok(chain_sync - .actions() - .map(|action| match action { - ChainSyncAction::SendBlockRequest { peer_id, request } => - SyncingAction::SendBlockRequest { peer_id, request }, - ChainSyncAction::SendStateRequest { peer_id, request } => - SyncingAction::SendStateRequest { peer_id, request }, - ChainSyncAction::CancelRequest { peer_id } => - SyncingAction::CancelRequest { peer_id }, - ChainSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - ChainSyncAction::ImportBlocks { origin, blocks } => - SyncingAction::ImportBlocks { origin, blocks }, - ChainSyncAction::ImportJustifications { - peer_id, - hash, - number, - justifications, - } => SyncingAction::ImportJustifications { - peer_id, - hash, - number, - justifications, - }, - }) - .collect()) + chain_sync.actions().map(Into::into).collect() } else { unreachable!("At least one syncing strategy is always active; qed") + }; + + if actions.iter().any(SyncingAction::is_finished) { + self.proceed_to_next()?; } + + Ok(actions) } /// Proceed with the next strategy if the active one finished. From 0add17e849c6c27da6b8c6a270c05c3188d7cc8c Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Jan 2024 15:39:47 +0200 Subject: [PATCH 19/31] Add PRDoc --- prdoc/pr_2814.prdoc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 prdoc/pr_2814.prdoc diff --git a/prdoc/pr_2814.prdoc b/prdoc/pr_2814.prdoc new file mode 100644 index 000000000000..b1a90d35d78a --- /dev/null +++ b/prdoc/pr_2814.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Share peers between syncing strategies + +doc: + - audience: Node Dev + description: | + New struct `PeerPool` is added to `SyncingStrategy` to allow running syncing strategies + in parellel and sharing peers. Available peers (with no active requests) are retrieved by + calling `PeerPool::available_peers()`, and a peer is reserved for a request by a syncing + strategy using `AvailablePeer::reserve()`. + +crates: + - name: sc-network-sync From 9800af85d5ee15251a70c00364ababe80c784e13 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Jan 2024 16:27:27 +0200 Subject: [PATCH 20/31] Test peers reservation in `WarpSync` --- .../client/network/sync/src/strategy/warp.rs | 170 +++++++++++++++--- 1 file changed, 144 insertions(+), 26 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 774e0e0f4310..fc0877bb02cc 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -974,6 +974,72 @@ mod test { assert!(warp_sync.warp_proof_request().is_none()); } + #[test] + fn warp_proof_request_reserves_peer() { + let client = mock_client_without_state(); + let mut provider = MockWarpSyncProvider::::new(); + provider + .expect_current_authorities() + .once() + .return_const(AuthorityList::default()); + let config = WarpSyncConfig::WithProvider(Arc::new(provider)); + let peer_pool = Arc::new(Mutex::new(Default::default())); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); + + // Make sure we have enough peers to make a request. + for best_number in 1..11 { + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); + } + assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); + + let (peer_id, _request) = warp_sync.warp_proof_request().unwrap(); + + // The peer is reserved. + assert!(!peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + } + + #[test] + fn warp_proof_response_frees_peer() { + let client = mock_client_without_state(); + let mut provider = MockWarpSyncProvider::::new(); + provider + .expect_current_authorities() + .once() + .return_const(AuthorityList::default()); + // It doesn't matter in this test if the warp proof verification succeeds. + provider.expect_verify().return_once(|_proof, _set_id, _authorities| { + Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure"))) + }); + let config = WarpSyncConfig::WithProvider(Arc::new(provider)); + let peer_pool = Arc::new(Mutex::new(Default::default())); + let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); + + // Make sure we have enough peers to make a request. + for best_number in 1..11 { + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); + } + assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); + + // Consume `SendWarpProofRequest` action. + let actions = warp_sync.actions().collect::>(); + assert_eq!(actions.len(), 1); + let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0] + else { + panic!("Invalid action"); + }; + // Peer we sent request to is now reserved. + assert!(!peer_pool.lock().available_peers().any(|p| *p.peer_id() == request_peer_id)); + + warp_sync.on_warp_proof_response(&request_peer_id, EncodedProof(Vec::new())); + + // The peer we sent a request to and received a response from is now available. + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == request_peer_id)); + } + #[test] fn bad_warp_proof_response_drops_peer() { let client = mock_client_without_state(); @@ -1228,11 +1294,6 @@ mod test { .build() .unwrap() .block; - let target_header = target_block.header().clone(); - // Warp proof is complete. - provider.expect_verify().return_once(move |_proof, set_id, authorities| { - Ok(VerificationResult::Complete(set_id, authorities, target_header)) - }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); @@ -1269,11 +1330,6 @@ mod test { .build() .unwrap() .block; - let target_header = target_block.header().clone(); - // Warp proof is complete. - provider.expect_verify().return_once(move |_proof, set_id, authorities| { - Ok(VerificationResult::Complete(set_id, authorities, target_header)) - }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); @@ -1326,11 +1382,6 @@ mod test { .unwrap(); let extra_block = extra_block_builder.build().unwrap().block; - let target_header = target_block.header().clone(); - // Warp proof is complete. - provider.expect_verify().return_once(move |_proof, set_id, authorities| { - Ok(VerificationResult::Complete(set_id, authorities, target_header)) - }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); @@ -1406,11 +1457,6 @@ mod test { .unwrap(); let wrong_block = wrong_block_builder.build().unwrap().block; - let target_header = target_block.header().clone(); - // Warp proof is complete. - provider.expect_verify().return_once(move |_proof, set_id, authorities| { - Ok(VerificationResult::Complete(set_id, authorities, target_header)) - }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); @@ -1462,11 +1508,6 @@ mod test { .push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])) .unwrap(); let target_block = target_block_builder.build().unwrap().block; - let target_header = target_block.header().clone(); - // Warp proof is complete. - provider.expect_verify().return_once(move |_proof, set_id, authorities| { - Ok(VerificationResult::Complete(set_id, authorities, target_header)) - }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); let peer_pool = Arc::new(Mutex::new(Default::default())); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); @@ -1511,5 +1552,82 @@ mod test { assert_eq!(result.target_justifications, justifications); } - // TODO: test that peers are freed from `PeerPool` when responses are received. + #[test] + fn target_block_request_reserves_peer() { + let client = Arc::new(TestClientBuilder::new().set_no_genesis().build()); + let target_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + let target_header = target_block.header().clone(); + let config = WarpSyncConfig::WaitForTarget; + let peer_pool = Arc::new(Mutex::new(Default::default())); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); + + // Make sure we have enough peers to make a request. + for best_number in 1..11 { + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); + } + + // No actions generated so far. + assert_eq!(warp_sync.actions().count(), 0); + + warp_sync.set_target_block(target_header); + assert!(matches!(warp_sync.phase, Phase::TargetBlock(_))); + + let (peer_id, _request) = warp_sync.target_block_request().unwrap(); + + // The peer requested is reserved. + assert!(!peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + } + + #[test] + fn target_block_response_frees_peer() { + let client = Arc::new(TestClientBuilder::new().set_no_genesis().build()); + let mut provider = MockWarpSyncProvider::::new(); + provider + .expect_current_authorities() + .once() + .return_const(AuthorityList::default()); + let target_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + let config = WarpSyncConfig::WithProvider(Arc::new(provider)); + let peer_pool = Arc::new(Mutex::new(Default::default())); + let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); + + // Make sure we have enough peers to make a request. + for best_number in 1..11 { + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + warp_sync.add_peer(peer_id, Hash::random(), best_number); + } + + // Manually set `TargetBlock` phase. + warp_sync.phase = Phase::TargetBlock(target_block.header().clone()); + + let (peer_id, request) = warp_sync.target_block_request().unwrap(); + + // The peer requested is reserved. + assert!(!peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + + // Dummy (empty) block response received. + let response = Vec::new(); + + let _ = warp_sync.on_block_response_inner(peer_id, request, response); + + // The peer we sent a request to and received a response from is now available. + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + } } From 56f92a8df58e619413d28c325d67c606f569ccc7 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Jan 2024 16:36:57 +0200 Subject: [PATCH 21/31] Test peers reservation by `StateStrategy` --- .../client/network/sync/src/strategy/state.rs | 42 ++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index d833262ec0b8..41ab6e4f79ba 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -564,7 +564,47 @@ mod test { } #[test] - fn received_state_response_makes_peer_available_again() { + fn state_request_reserves_peer() { + let client = Arc::new(TestClientBuilder::new().set_no_genesis().build()); + let target_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + + let initial_peers = + (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + initial_peers + .iter() + .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + + let mut state_strategy = StateStrategy::new( + client.clone(), + target_block.header().clone(), + None, + None, + false, + initial_peers.into_iter(), + peer_pool.clone(), + ); + + let (peer_id, _opaque_request) = state_strategy.state_request().unwrap(); + + // The peer requested is reserved. + assert!(matches!( + state_strategy.peers.get(&peer_id).unwrap().state, + PeerState::DownloadingState, + )); + // Also in `PeerPool`. + assert!(!peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + } + + #[test] + fn state_response_makes_peer_available_again() { let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); From cd6cfe3df3b8eea06bdce25b7800bdf402a93621 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Jan 2024 17:19:12 +0200 Subject: [PATCH 22/31] Test `PeerPool` --- .../client/network/sync/src/extra_requests.rs | 10 +- substrate/client/network/sync/src/lib.rs | 1 + .../client/network/sync/src/peer_pool.rs | 213 ++++++++++++++++++ substrate/client/network/sync/src/strategy.rs | 83 +------ .../client/network/sync/src/strategy/state.rs | 2 +- .../client/network/sync/src/strategy/warp.rs | 2 +- 6 files changed, 222 insertions(+), 89 deletions(-) create mode 100644 substrate/client/network/sync/src/peer_pool.rs diff --git a/substrate/client/network/sync/src/extra_requests.rs b/substrate/client/network/sync/src/extra_requests.rs index 7d83667f5d58..4b52f04b434a 100644 --- a/substrate/client/network/sync/src/extra_requests.rs +++ b/substrate/client/network/sync/src/extra_requests.rs @@ -17,9 +17,7 @@ // along with this program. If not, see . use crate::{ - request_metrics::Metrics, - strategy::{chain_sync::PeerSync, PeerPool}, - LOG_TARGET, + peer_pool::PeerPool, request_metrics::Metrics, strategy::chain_sync::PeerSync, LOG_TARGET, }; use fork_tree::ForkTree; use libp2p::PeerId; @@ -354,9 +352,9 @@ impl<'a, B: BlockT> Matcher<'a, B> { #[cfg(test)] mod tests { use super::*; - use crate::strategy::{ - chain_sync::{PeerSync, PeerSyncState}, - PeerPool, + use crate::{ + peer_pool::PeerPool, + strategy::chain_sync::{PeerSync, PeerSyncState}, }; use parking_lot::Mutex; use quickcheck::{Arbitrary, Gen, QuickCheck}; diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 494e3b87aa95..ce5a7642300e 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -25,6 +25,7 @@ pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusPro mod block_announce_validator; mod extra_requests; mod futures_stream; +mod peer_pool; mod pending_responses; mod request_metrics; mod schema; diff --git a/substrate/client/network/sync/src/peer_pool.rs b/substrate/client/network/sync/src/peer_pool.rs new file mode 100644 index 000000000000..3914a5e62e68 --- /dev/null +++ b/substrate/client/network/sync/src/peer_pool.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! [`PeerPool`] manages the peers available for requests by syncing strategies. + +use crate::LOG_TARGET; +use libp2p::PeerId; +use log::warn; +use std::collections::HashMap; + +#[derive(Debug)] +enum PeerStatus { + Available, + Reserved, +} + +impl PeerStatus { + fn is_available(&self) -> bool { + matches!(self, PeerStatus::Available) + } +} + +#[derive(Default, Debug)] +pub struct PeerPool { + peers: HashMap, +} + +pub struct AvailablePeer<'a> { + peer_id: &'a PeerId, + status: &'a mut PeerStatus, +} + +impl<'a> AvailablePeer<'a> { + pub fn peer_id(&self) -> &'a PeerId { + self.peer_id + } + + pub fn reserve(&mut self) { + *self.status = PeerStatus::Reserved; + } +} + +impl PeerPool { + pub fn add_peer(&mut self, peer_id: PeerId) { + self.peers.insert(peer_id, PeerStatus::Available); + } + + pub fn remove_peer(&mut self, peer_id: &PeerId) { + self.peers.remove(peer_id); + } + + pub fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { + self.peers.iter_mut().filter_map(|(peer_id, status)| { + status.is_available().then_some(AvailablePeer::<'a> { peer_id, status }) + }) + } + + pub fn try_reserve_peer(&mut self, peer_id: &PeerId) -> bool { + match self.peers.get_mut(peer_id) { + Some(peer_status) => match peer_status { + PeerStatus::Available => { + *peer_status = PeerStatus::Reserved; + true + }, + PeerStatus::Reserved => false, + }, + None => { + warn!(target: LOG_TARGET, "Trying to reserve unknown peer {peer_id}."); + false + }, + } + } + + pub fn free_peer(&mut self, peer_id: &PeerId) { + match self.peers.get_mut(peer_id) { + Some(peer_status) => match peer_status { + PeerStatus::Available => { + warn!(target: LOG_TARGET, "Trying to free available peer {peer_id}.") + }, + PeerStatus::Reserved => { + *peer_status = PeerStatus::Available; + }, + }, + None => { + warn!(target: LOG_TARGET, "Trying to free unknown peer {peer_id}."); + }, + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn adding_peer() { + let mut peer_pool = PeerPool::default(); + assert_eq!(peer_pool.available_peers().count(), 0); + + // Add peer. + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + + // Peer is available. + assert_eq!(peer_pool.available_peers().count(), 1); + assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + } + + #[test] + fn removing_peer() { + let mut peer_pool = PeerPool::default(); + assert_eq!(peer_pool.available_peers().count(), 0); + + // Add peer. + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + assert_eq!(peer_pool.available_peers().count(), 1); + assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + + // Remove peer. + peer_pool.remove_peer(&peer_id); + assert_eq!(peer_pool.available_peers().count(), 0); + } + + #[test] + fn reserving_peer_via_available_peers() { + let mut peer_pool = PeerPool::default(); + assert_eq!(peer_pool.available_peers().count(), 0); + + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + assert_eq!(peer_pool.available_peers().count(), 1); + assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + + // Reserve via `available_peers()`. + peer_pool.available_peers().for_each(|mut available_peer| { + assert_eq!(*available_peer.peer_id(), peer_id); + available_peer.reserve(); + }); + + // Peer is reserved. + assert_eq!(peer_pool.available_peers().count(), 0); + assert!(!peer_pool.try_reserve_peer(&peer_id)); + } + + #[test] + fn reserving_peer_via_try_reserve() { + let mut peer_pool = PeerPool::default(); + assert_eq!(peer_pool.available_peers().count(), 0); + + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + assert_eq!(peer_pool.available_peers().count(), 1); + assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + + // Reserve via `try_reserve_peer()`. + assert!(peer_pool.try_reserve_peer(&peer_id)); + + // Peer is reserved. + assert_eq!(peer_pool.available_peers().count(), 0); + assert!(!peer_pool.try_reserve_peer(&peer_id)); + } + + #[test] + fn freeing_peer() { + let mut peer_pool = PeerPool::default(); + assert_eq!(peer_pool.available_peers().count(), 0); + + let peer_id = PeerId::random(); + peer_pool.add_peer(peer_id); + assert_eq!(peer_pool.available_peers().count(), 1); + assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + + // Reserve via `try_reserve_peer()`. + assert!(peer_pool.try_reserve_peer(&peer_id)); + assert_eq!(peer_pool.available_peers().count(), 0); + assert!(!peer_pool.try_reserve_peer(&peer_id)); + + // Free peer. + peer_pool.free_peer(&peer_id); + + // Peer is available. + assert_eq!(peer_pool.available_peers().count(), 1); + assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + + // And can be reserved again. + assert!(peer_pool.try_reserve_peer(&peer_id)); + } + + #[test] + fn reserving_unknown_peer_fails() { + let mut peer_pool = PeerPool::default(); + assert_eq!(peer_pool.available_peers().count(), 0); + + let peer_id = PeerId::random(); + assert!(!peer_pool.try_reserve_peer(&peer_id)); + } +} diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 7c966084e180..aa4364d18430 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -25,12 +25,13 @@ pub mod state_sync; pub mod warp; use crate::{ + peer_pool::PeerPool, types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncStatus}, LOG_TARGET, }; use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; use libp2p::PeerId; -use log::{error, info, warn}; +use log::{error, info}; use parking_lot::Mutex; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; @@ -72,86 +73,6 @@ pub struct SyncingConfig { pub metrics_registry: Option, } -#[derive(Debug)] -enum PeerStatus { - Available, - Reserved, -} - -impl PeerStatus { - fn is_available(&self) -> bool { - matches!(self, PeerStatus::Available) - } -} - -#[derive(Default, Debug)] -pub struct PeerPool { - peers: HashMap, -} - -pub struct AvailablePeer<'a> { - peer_id: &'a PeerId, - status: &'a mut PeerStatus, -} - -impl<'a> AvailablePeer<'a> { - pub fn peer_id(&self) -> &'a PeerId { - self.peer_id - } - - pub fn reserve(&mut self) { - *self.status = PeerStatus::Reserved; - } -} - -impl PeerPool { - pub fn add_peer(&mut self, peer_id: PeerId) { - self.peers.insert(peer_id, PeerStatus::Available); - } - - pub fn remove_peer(&mut self, peer_id: &PeerId) { - self.peers.remove(peer_id); - } - - pub fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { - self.peers.iter_mut().filter_map(|(peer_id, status)| { - status.is_available().then_some(AvailablePeer::<'a> { peer_id, status }) - }) - } - - pub fn try_reserve_peer(&mut self, peer_id: &PeerId) -> bool { - match self.peers.get_mut(peer_id) { - Some(peer_status) => match peer_status { - PeerStatus::Available => { - *peer_status = PeerStatus::Reserved; - true - }, - PeerStatus::Reserved => false, - }, - None => { - warn!(target: LOG_TARGET, "Trying to reserve unknown peer {peer_id}."); - false - }, - } - } - - pub fn free_peer(&mut self, peer_id: &PeerId) { - match self.peers.get_mut(peer_id) { - Some(peer_status) => match peer_status { - PeerStatus::Available => { - warn!(target: LOG_TARGET, "Trying to free available peer {peer_id}.") - }, - PeerStatus::Reserved => { - *peer_status = PeerStatus::Available; - }, - }, - None => { - warn!(target: LOG_TARGET, "Trying to free unknown peer {peer_id}."); - }, - } - } -} - #[derive(Debug)] pub enum SyncingAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 41ab6e4f79ba..087505447761 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -18,8 +18,8 @@ //! State sync strategy. -use super::PeerPool; use crate::{ + peer_pool::PeerPool, schema::v1::StateResponse, strategy::state_sync::{ImportResult, StateSync, StateSyncProvider}, types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus}, diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index fc0877bb02cc..c0da5abfc747 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -20,8 +20,8 @@ pub use sp_consensus_grandpa::{AuthorityList, SetId}; -use super::PeerPool; use crate::{ + peer_pool::PeerPool, strategy::chain_sync::validate_blocks, types::{BadPeer, SyncState, SyncStatus}, LOG_TARGET, From 7d7f936fc706bf8a8c5c47b37081163bc1a9d2b1 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Jan 2024 18:40:48 +0200 Subject: [PATCH 23/31] WIP: test peer reservation by `ChainSync` --- .../sync/src/strategy/chain_sync/test.rs | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 934d443b2986..916837029b3e 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -1033,3 +1033,98 @@ fn request_across_forks() { assert!(sync.is_known(&block.header.parent_hash())); } } + +#[test] +fn block_request_at_genesis_reserves_peer_in_peer_pool() { + let client = Arc::new(TestClientBuilder::new().build()); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Add a peer. + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + sync.add_peer(peer_id, Hash::random(), 10); + + // A request is sent to our peer. + let actions = sync.actions().collect::>(); + assert_eq!(actions.len(), 1); + assert!(actions.iter().all(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id: requested_peer_id, .. } => + requested_peer_id == &peer_id, + _ => false, + })); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); +} + +#[test] +fn block_response_frees_peer_in_peer_pool() { + let client = Arc::new(TestClientBuilder::new().build()); + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Create blocks. + let blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + (0..10).map(|_| build_block(&mut client, None, false)).collect::>() + }; + let best_block = blocks.last().unwrap().clone(); + + // Add a peer. + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + sync.add_peer(peer_id, best_block.hash(), *best_block.header().number()); + + // Transit peers into requestable state + let _ = sync.handle_new_peers().collect::>(); + + // A request is sent to our peer. + let request = get_block_request(&mut sync, FromBlock::Hash(best_block.hash()), 10, &peer_id); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); + + // Receive response. + let mut response_blocks = blocks.clone(); + response_blocks.reverse(); + let response = create_block_response(response_blocks); + sync.on_block_data(&peer_id, Some(request), response).unwrap(); + + // The peer is now available again. + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); +} + +#[test] +fn new_peer_ancestry_search_reserves_peer_in_peer_pool() { + todo!(); +} + +#[test] +fn forced_ancestry_search_reserves_peer_in_peer_pool() { + todo!(); +} + +#[test] +fn peer_block_request_reserves_peer_in_peer_pool() { + todo!(); +} + +#[test] +fn fork_sync_request_reserves_peer_in_peer_pool() { + todo!(); +} + +#[test] +fn justification_request_reserves_peer_in_peer_pool() { + todo!(); +} + +#[test] +fn justification_response_frees_peer_in_peer_pool() { + todo!(); +} From 2605f2ab9a2d60cb958e685183753e4a10e2773a Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 30 Jan 2024 16:18:20 +0200 Subject: [PATCH 24/31] Test peer reservation in `PeerPool` by `ChainSync` --- .../sync/src/strategy/chain_sync/test.rs | 228 +++++++++++++++++- 1 file changed, 221 insertions(+), 7 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 916837029b3e..3382cddef3c1 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -288,7 +288,7 @@ fn unwrap_from_block_number(from: FromBlock) -> u64 { /// announcement from this node in its sync process. Meaning our common number didn't change. It /// is now expected that we start an ancestor search to find the common number. #[test] -fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { +fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { sp_tracing::try_init_simple(); let blocks = { @@ -1101,30 +1101,244 @@ fn block_response_frees_peer_in_peer_pool() { #[test] fn new_peer_ancestry_search_reserves_peer_in_peer_pool() { - todo!(); + let mut client = Arc::new(TestClientBuilder::new().build()); + + // Import blocks to make sure we are not at genesis to kick in ancestry search. + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Check that we are not at genesis, so ancestry search will be started. + assert!(!sync.best_queued_number.is_zero()); + + // Add a peer. + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + sync.add_peer(peer_id, Hash::random(), 10); + + // Initiate ancestry search request to our peer. + let _ = sync.handle_new_peers().collect::>(); + + // Make sure we are in fact doing ancestry search. + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::AncestorSearch { .. }))); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); } #[test] fn forced_ancestry_search_reserves_peer_in_peer_pool() { - todo!(); + let mut client = Arc::new(TestClientBuilder::new().build()); + + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Add a peer that will be higher than our queued number (see below). + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + sync.add_peer(peer_id, Hash::random(), (MAX_BLOCKS_TO_LOOK_BACKWARDS + 2).into()); + + // Make sure adding peer doesn't generate ancestry request in `handle_new_peers`. + assert_eq!(sync.handle_new_peers().count(), 0); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::Available))); + + // To trigger desired branch in `ChainSync` we must be more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` + // above the common number (genesis), the peer best number should be higher than our best queud, + // and the common number must be smaller than the last finalized number. + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS + 1) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + // Manually update best queued. + let info = client.info(); + sync.best_queued_hash = info.best_hash; + sync.best_queued_number = info.best_number; + + // Finalize block 1. + let just = (*b"TEST", Vec::new()); + client.finalize_block(blocks[0].hash(), Some(just)).unwrap(); + + // Now the desired branch in `block_requests` should be triggered. + let _ = sync.block_requests(); + + // Make sure we are in fact doing ancestry search. + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::AncestorSearch { .. }))); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); } #[test] fn peer_block_request_reserves_peer_in_peer_pool() { - todo!(); + let client = Arc::new(TestClientBuilder::new().build()); + + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Add a peer that will be higher than our queued number (see below). + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + sync.add_peer(peer_id, Hash::random(), 10); + + // Make sure adding peer doesn't generate fork request in `handle_new_peers`. + assert_eq!(sync.handle_new_peers().count(), 0); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::Available))); + + // Now we should be downloading new blocks. + let _ = sync.block_requests(); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::DownloadingNew(_)))); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); } #[test] fn fork_sync_request_reserves_peer_in_peer_pool() { - todo!(); + let client = Arc::new(TestClientBuilder::new().build()); + + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Add a peer that will be higher than our queued number (see below). + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + let fork_hash = Hash::random(); + let fork_number = 10; + sync.add_peer(peer_id, fork_hash, fork_number); + + // Make sure adding peer doesn't generate fork request in `handle_new_peers`. + assert_eq!(sync.handle_new_peers().count(), 0); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::Available))); + + // Add a fork target. + sync.set_sync_fork_request(vec![peer_id], &fork_hash, fork_number); + + // Manually patch `best_queued` to be > `peer.best_number` to trigger fork request. + sync.best_queued_number = 20; + + // Now we should be downloading an old fork. + let _ = sync.block_requests(); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::DownloadingStale(_)))); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); } #[test] fn justification_request_reserves_peer_in_peer_pool() { - todo!(); + let mut client = Arc::new(TestClientBuilder::new().build()); + + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Add a peer that will be higher than our queued number (see below). + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + sync.add_peer(peer_id, Hash::random(), (MAX_BLOCKS_TO_LOOK_BACKWARDS + 2).into()); + + // Make sure adding peer doesn't generate ancestry request in `handle_new_peers`. + assert_eq!(sync.handle_new_peers().count(), 0); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::Available))); + + // Import some blocks to request justifications for. + let blocks = (0..10).map(|_| build_block(&mut client, None, false)).collect::>(); + + let best_block = blocks.last().unwrap(); + + // Request an extra justification for block 10. + sync.request_justification(&best_block.hash(), *best_block.header().number()); + + // Generate request. + let _ = sync.justification_requests(); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::DownloadingJustification(_)))); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); } #[test] fn justification_response_frees_peer_in_peer_pool() { - todo!(); + let mut client = Arc::new(TestClientBuilder::new().build()); + + let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let mut sync = + ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) + .unwrap(); + + // Add a peer that will be higher than our queued number (see below). + let peer_id = PeerId::random(); + peer_pool.lock().add_peer(peer_id); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); + sync.add_peer(peer_id, Hash::random(), (MAX_BLOCKS_TO_LOOK_BACKWARDS + 2).into()); + + // Make sure adding peer doesn't generate ancestry request in `handle_new_peers`. + assert_eq!(sync.handle_new_peers().count(), 0); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::Available))); + + // Import some blocks to request justifications for. + let blocks = (0..10).map(|_| build_block(&mut client, None, false)).collect::>(); + + let best_block = blocks.last().unwrap(); + + // Request an extra justification for block 10. + sync.request_justification(&best_block.hash(), *best_block.header().number()); + + // Generate request. + let (_peer_id, request) = sync.justification_requests().pop().unwrap(); + assert!(matches!( + sync.peers.get(&peer_id).unwrap(), + PeerSync { state, .. } if matches!(state, PeerSyncState::DownloadingJustification(_)))); + + // The peer is reserved in `PeerPool`. + assert_eq!(peer_pool.lock().available_peers().count(), 0); + + // We receive a justification response (dummy payload doesn't matter). + sync.on_block_response(peer_id, request, Vec::new()); + + // The peer is freed in `PeerPool`. + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); } From acc7a765b45b8d598d64e3d80ef2781b03895af9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 31 Jan 2024 10:14:24 +0200 Subject: [PATCH 25/31] Apply suggestions from code review Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> --- substrate/client/network/sync/src/strategy.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index aa4364d18430..4ece277dd25a 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -251,11 +251,13 @@ where } else { Some((announce.header.hash(), *announce.header.number())) }; + if let Some(new_best) = new_best { if let Some(best) = self.peer_best_blocks.get_mut(&peer_id) { *best = new_best; } } + new_best } From da9dd82734206d0149905c8d003b9306df194e64 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 31 Jan 2024 10:15:19 +0200 Subject: [PATCH 26/31] Apply suggestions from code review --- substrate/client/network/sync/src/strategy/state.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 087505447761..921c014066c7 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -868,6 +868,4 @@ mod test { // No more actions generated. assert_eq!(state_strategy.actions().count(), 0); } - - // TODO: test that peers are freed from `PeerPool` when responses are received. } From 94a07a6d00d3a2266dbc402536b40468438c4885 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 31 Jan 2024 15:51:39 +0200 Subject: [PATCH 27/31] Apply review suggestions --- substrate/client/network/sync/src/strategy.rs | 59 ++--- .../network/sync/src/strategy/chain_sync.rs | 16 +- .../sync/src/strategy/chain_sync/test.rs | 220 ++++++++++++++---- 3 files changed, 216 insertions(+), 79 deletions(-) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 4ece277dd25a..31a288b54093 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -198,6 +198,7 @@ where config.max_blocks_per_request, config.metrics_registry.clone(), peer_pool.clone(), + std::iter::empty(), )?; Ok(Self { config, @@ -216,18 +217,16 @@ where self.peer_pool.lock().add_peer(peer_id); self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); - self.warp.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); - self.state.iter_mut().for_each(|s| s.add_peer(peer_id, best_hash, best_number)); - self.chain_sync - .iter_mut() - .for_each(|s| s.add_peer(peer_id, best_hash, best_number)); + self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); + self.state.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); + self.chain_sync.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); } /// Notify that a peer has disconnected. pub fn remove_peer(&mut self, peer_id: &PeerId) { - self.warp.iter_mut().for_each(|s| s.remove_peer(peer_id)); - self.state.iter_mut().for_each(|s| s.remove_peer(peer_id)); - self.chain_sync.iter_mut().for_each(|s| s.remove_peer(peer_id)); + self.warp.as_mut().map(|s| s.remove_peer(peer_id)); + self.state.as_mut().map(|s| s.remove_peer(peer_id)); + self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id)); self.peer_pool.lock().remove_peer(peer_id); self.peer_best_blocks.remove(peer_id); @@ -418,16 +417,19 @@ where &mut self, target_header: B::Header, ) -> Result<(), ()> { - if let Some(ref mut warp) = self.warp { - warp.set_target_block(target_header); - Ok(()) - } else { - error!( - target: LOG_TARGET, - "Cannot set warp sync target block: no warp sync strategy is active." - ); - debug_assert!(false); - Err(()) + match self.warp { + Some(ref mut warp) => { + warp.set_target_block(target_header); + Ok(()) + }, + None => { + error!( + target: LOG_TARGET, + "Cannot set warp sync target block: no warp sync strategy is active." + ); + debug_assert!(false); + Err(()) + }, } } @@ -477,6 +479,7 @@ where self.warp = None; self.state = Some(state_sync); + Ok(()) }, None => { @@ -484,13 +487,16 @@ where target: LOG_TARGET, "Warp sync failed. Falling back to full sync.", ); - let mut chain_sync = match ChainSync::new( + let chain_sync = match ChainSync::new( chain_sync_mode(self.config.mode), self.client.clone(), self.config.max_parallel_downloads, self.config.max_blocks_per_request, self.config.metrics_registry.clone(), self.peer_pool.clone(), + self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { + (*peer_id, *best_hash, *best_number) + }), ) { Ok(chain_sync) => chain_sync, Err(e) => { @@ -498,13 +504,10 @@ where return Err(e) }, }; - // Let `ChainSync` know about connected peers. - self.peer_best_blocks.iter().for_each(|(peer_id, (best_hash, best_number))| { - chain_sync.add_peer(*peer_id, *best_hash, *best_number) - }); self.warp = None; self.chain_sync = Some(chain_sync); + Ok(()) }, } @@ -514,13 +517,16 @@ where } else { error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); } - let mut chain_sync = match ChainSync::new( + let chain_sync = match ChainSync::new( chain_sync_mode(self.config.mode), self.client.clone(), self.config.max_parallel_downloads, self.config.max_blocks_per_request, self.config.metrics_registry.clone(), self.peer_pool.clone(), + self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { + (*peer_id, *best_hash, *best_number) + }), ) { Ok(chain_sync) => chain_sync, Err(e) => { @@ -528,13 +534,10 @@ where return Err(e); }, }; - // Let `ChainSync` know about connected peers. - self.peer_best_blocks.iter().for_each(|(peer_id, (best_hash, best_number))| { - chain_sync.add_peer(*peer_id, *best_hash, *best_number) - }); self.state = None; self.chain_sync = Some(chain_sync); + Ok(()) } else { unreachable!("Only warp & state strategies can finish; qed") diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 9f5dc6bab41d..0e6056725d92 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -380,10 +380,24 @@ where max_blocks_per_request: u32, metrics_registry: Option, peer_pool: Arc>, + initial_peers: impl Iterator)>, ) -> Result { let mut sync = Self { client, - peers: HashMap::new(), + peers: initial_peers + .map(|(peer_id, best_hash, best_number)| { + ( + peer_id, + PeerSync { + peer_id, + common_number: Zero::zero(), + best_hash, + best_number, + state: PeerSyncState::New, + }, + ) + }) + .collect(), blocks: BlockCollection::new(), best_queued_hash: Default::default(), best_queued_number: Zero::zero(), diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 3382cddef3c1..f9c522816da9 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -40,9 +40,16 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { let peer_id = PeerId::random(); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let (a1_hash, a1_number) = { let a1 = BlockBuilderBuilder::new(&*client) @@ -101,8 +108,16 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { // we request max 8 blocks to always initiate block requests to both peers for the test to be // deterministic - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 8, None, peer_pool.clone()).unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 8, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -302,9 +317,16 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { let info = client.info(); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 5, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -455,9 +477,16 @@ fn can_sync_huge_fork() { let info = client.info(); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 5, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); @@ -592,9 +621,16 @@ fn syncs_fork_without_duplicate_requests() { let info = client.info(); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 5, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); @@ -731,9 +767,16 @@ fn removes_target_fork_on_disconnect() { let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); @@ -760,9 +803,16 @@ fn can_import_response_with_missing_blocks() { let empty_client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, empty_client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + empty_client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); @@ -797,9 +847,16 @@ fn ancestor_search_repeat() { fn sync_restart_removes_block_but_not_justification_requests() { let mut client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); let peers = vec![PeerId::random(), PeerId::random()]; @@ -951,9 +1008,16 @@ fn request_across_forks() { let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 5, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add the peers, all at the common ancestor 100. let common_block = blocks.last().unwrap(); @@ -1038,9 +1102,16 @@ fn request_across_forks() { fn block_request_at_genesis_reserves_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add a peer. let peer_id = PeerId::random(); @@ -1064,9 +1135,16 @@ fn block_request_at_genesis_reserves_peer_in_peer_pool() { fn block_response_frees_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Create blocks. let blocks = { @@ -1115,9 +1193,16 @@ fn new_peer_ancestry_search_reserves_peer_in_peer_pool() { block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Check that we are not at genesis, so ancestry search will be started. assert!(!sync.best_queued_number.is_zero()); @@ -1145,9 +1230,16 @@ fn forced_ancestry_search_reserves_peer_in_peer_pool() { let mut client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); @@ -1194,9 +1286,16 @@ fn peer_block_request_reserves_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); @@ -1225,9 +1324,16 @@ fn fork_sync_request_reserves_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); @@ -1264,9 +1370,16 @@ fn justification_request_reserves_peer_in_peer_pool() { let mut client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); @@ -1303,9 +1416,16 @@ fn justification_response_frees_peer_in_peer_pool() { let mut client = Arc::new(TestClientBuilder::new().build()); let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - let mut sync = - ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, peer_pool.clone()) - .unwrap(); + let mut sync = ChainSync::new( + ChainSyncMode::Full, + client.clone(), + 1, + 64, + None, + peer_pool.clone(), + std::iter::empty(), + ) + .unwrap(); // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); From 7aa59a3bda97e7651b2e49f61776c6d770cfcde5 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 1 Feb 2024 10:09:44 +0200 Subject: [PATCH 28/31] minor: log message --- substrate/client/network/sync/src/strategy/chain_sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index 0e6056725d92..e2bc37b21252 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -1700,7 +1700,7 @@ where warn!( target: LOG_TARGET, "State inconsistency: peer {peer_id} is in the pool of connected peers, \ - but not known to `WarpSync`.", + but not known to `ChainSync`.", ); debug_assert!(false); None From 8d6a6d7a2a8b16cd4fc66c2a0cadbb13cc52b5e9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 1 Feb 2024 11:08:15 +0200 Subject: [PATCH 29/31] Rename `extra_requests.rs` -> `justification_requests.rs`, add docs --- .../sync/src/{extra_requests.rs => justification_requests.rs} | 4 ++++ substrate/client/network/sync/src/lib.rs | 2 +- substrate/client/network/sync/src/strategy/chain_sync.rs | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) rename substrate/client/network/sync/src/{extra_requests.rs => justification_requests.rs} (98%) diff --git a/substrate/client/network/sync/src/extra_requests.rs b/substrate/client/network/sync/src/justification_requests.rs similarity index 98% rename from substrate/client/network/sync/src/extra_requests.rs rename to substrate/client/network/sync/src/justification_requests.rs index 4b52f04b434a..c596e3760b09 100644 --- a/substrate/client/network/sync/src/extra_requests.rs +++ b/substrate/client/network/sync/src/justification_requests.rs @@ -16,6 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +//! Justification requests scheduling. [`ExtraRequests`] manages requesting justifications +//! from peers taking into account forks and their finalization (dropping pending requests +//! that don't make sense after one of the forks is finalized). + use crate::{ peer_pool::PeerPool, request_metrics::Metrics, strategy::chain_sync::PeerSync, LOG_TARGET, }; diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index ce5a7642300e..b51da98e7533 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -23,8 +23,8 @@ pub use strategy::warp::{WarpSyncParams, WarpSyncPhase, WarpSyncProgress}; pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; mod block_announce_validator; -mod extra_requests; mod futures_stream; +mod justification_requests; mod peer_pool; mod pending_responses; mod request_metrics; diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index e2bc37b21252..b5a84feeecce 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -31,7 +31,7 @@ use super::PeerPool; use crate::{ blocks::BlockCollection, - extra_requests::ExtraRequests, + justification_requests::ExtraRequests, schema::v1::StateResponse, strategy::{ state_sync::{ImportResult, StateSync, StateSyncProvider}, From 02546809750353f95a3789f64f7d3272de208419 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 1 Feb 2024 11:59:05 +0200 Subject: [PATCH 30/31] Hide mutex behind `PeerPool` interface --- .../sync/src/justification_requests.rs | 16 +-- .../client/network/sync/src/peer_pool.rs | 98 +++++++++++-------- substrate/client/network/sync/src/strategy.rs | 11 +-- .../network/sync/src/strategy/chain_sync.rs | 15 ++- .../sync/src/strategy/chain_sync/test.rs | 79 ++++++++------- .../client/network/sync/src/strategy/state.rs | 67 ++++++------- .../client/network/sync/src/strategy/warp.rs | 96 +++++++++--------- 7 files changed, 187 insertions(+), 195 deletions(-) diff --git a/substrate/client/network/sync/src/justification_requests.rs b/substrate/client/network/sync/src/justification_requests.rs index c596e3760b09..3fbcb40ed5d5 100644 --- a/substrate/client/network/sync/src/justification_requests.rs +++ b/substrate/client/network/sync/src/justification_requests.rs @@ -26,12 +26,10 @@ use crate::{ use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, error, trace, warn}; -use parking_lot::Mutex; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ collections::{HashMap, HashSet, VecDeque}, - sync::Arc, time::{Duration, Instant}, }; @@ -292,7 +290,7 @@ impl<'a, B: BlockT> Matcher<'a, B> { pub(crate) fn next( &mut self, peers: &HashMap>, - peer_pool: &Arc>, + peer_pool: &PeerPool, ) -> Option<(PeerId, ExtraRequest)> { if self.remaining == 0 { return None @@ -360,14 +358,10 @@ mod tests { peer_pool::PeerPool, strategy::chain_sync::{PeerSync, PeerSyncState}, }; - use parking_lot::Mutex; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sp_blockchain::Error as ClientError; use sp_test_primitives::{Block, BlockNumber, Hash}; - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; + use std::collections::{HashMap, HashSet}; #[test] fn requests_are_processed_in_order() { @@ -589,18 +583,18 @@ mod tests { #[derive(Debug, Clone)] struct ArbitraryPeers { peers: HashMap>, - peer_pool: Arc>, + peer_pool: PeerPool, } impl Arbitrary for ArbitraryPeers { fn arbitrary(g: &mut Gen) -> Self { let mut peers = HashMap::with_capacity(g.size()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); for _ in 0..g.size() { let ps = ArbitraryPeerSync::arbitrary(g).0; let peer_id = ps.peer_id; peers.insert(peer_id, ps); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); } ArbitraryPeers { peers, peer_pool } } diff --git a/substrate/client/network/sync/src/peer_pool.rs b/substrate/client/network/sync/src/peer_pool.rs index 3914a5e62e68..41db903c4b63 100644 --- a/substrate/client/network/sync/src/peer_pool.rs +++ b/substrate/client/network/sync/src/peer_pool.rs @@ -21,7 +21,8 @@ use crate::LOG_TARGET; use libp2p::PeerId; use log::warn; -use std::collections::HashMap; +use parking_lot::{Mutex, MutexGuard}; +use std::{collections::HashMap, sync::Arc}; #[derive(Debug)] enum PeerStatus { @@ -35,8 +36,13 @@ impl PeerStatus { } } -#[derive(Default, Debug)] +#[derive(Default, Debug, Clone)] pub struct PeerPool { + inner: Arc>, +} + +#[derive(Default, Debug)] +pub struct PeerPoolInner { peers: HashMap, } @@ -55,23 +61,29 @@ impl<'a> AvailablePeer<'a> { } } +impl PeerPoolInner { + pub fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { + self.peers.iter_mut().filter_map(|(peer_id, status)| { + status.is_available().then_some(AvailablePeer::<'a> { peer_id, status }) + }) + } +} + impl PeerPool { - pub fn add_peer(&mut self, peer_id: PeerId) { - self.peers.insert(peer_id, PeerStatus::Available); + pub fn lock<'a>(&'a self) -> MutexGuard<'a, PeerPoolInner> { + self.inner.lock() } - pub fn remove_peer(&mut self, peer_id: &PeerId) { - self.peers.remove(peer_id); + pub fn add_peer(&self, peer_id: PeerId) { + self.inner.lock().peers.insert(peer_id, PeerStatus::Available); } - pub fn available_peers<'a>(&'a mut self) -> impl Iterator + 'a { - self.peers.iter_mut().filter_map(|(peer_id, status)| { - status.is_available().then_some(AvailablePeer::<'a> { peer_id, status }) - }) + pub fn remove_peer(&self, peer_id: &PeerId) { + self.inner.lock().peers.remove(peer_id); } - pub fn try_reserve_peer(&mut self, peer_id: &PeerId) -> bool { - match self.peers.get_mut(peer_id) { + pub fn try_reserve_peer(&self, peer_id: &PeerId) -> bool { + match self.inner.lock().peers.get_mut(peer_id) { Some(peer_status) => match peer_status { PeerStatus::Available => { *peer_status = PeerStatus::Reserved; @@ -86,8 +98,8 @@ impl PeerPool { } } - pub fn free_peer(&mut self, peer_id: &PeerId) { - match self.peers.get_mut(peer_id) { + pub fn free_peer(&self, peer_id: &PeerId) { + match self.inner.lock().peers.get_mut(peer_id) { Some(peer_status) => match peer_status { PeerStatus::Available => { warn!(target: LOG_TARGET, "Trying to free available peer {peer_id}.") @@ -109,94 +121,94 @@ mod test { #[test] fn adding_peer() { - let mut peer_pool = PeerPool::default(); - assert_eq!(peer_pool.available_peers().count(), 0); + let peer_pool = PeerPool::default(); + assert_eq!(peer_pool.lock().available_peers().count(), 0); // Add peer. let peer_id = PeerId::random(); peer_pool.add_peer(peer_id); // Peer is available. - assert_eq!(peer_pool.available_peers().count(), 1); - assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + assert_eq!(peer_pool.lock().available_peers().count(), 1); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); } #[test] fn removing_peer() { - let mut peer_pool = PeerPool::default(); - assert_eq!(peer_pool.available_peers().count(), 0); + let peer_pool = PeerPool::default(); + assert_eq!(peer_pool.lock().available_peers().count(), 0); // Add peer. let peer_id = PeerId::random(); peer_pool.add_peer(peer_id); - assert_eq!(peer_pool.available_peers().count(), 1); - assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + assert_eq!(peer_pool.lock().available_peers().count(), 1); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); // Remove peer. peer_pool.remove_peer(&peer_id); - assert_eq!(peer_pool.available_peers().count(), 0); + assert_eq!(peer_pool.lock().available_peers().count(), 0); } #[test] fn reserving_peer_via_available_peers() { - let mut peer_pool = PeerPool::default(); - assert_eq!(peer_pool.available_peers().count(), 0); + let peer_pool = PeerPool::default(); + assert_eq!(peer_pool.lock().available_peers().count(), 0); let peer_id = PeerId::random(); peer_pool.add_peer(peer_id); - assert_eq!(peer_pool.available_peers().count(), 1); - assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + assert_eq!(peer_pool.lock().available_peers().count(), 1); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); // Reserve via `available_peers()`. - peer_pool.available_peers().for_each(|mut available_peer| { + peer_pool.lock().available_peers().for_each(|mut available_peer| { assert_eq!(*available_peer.peer_id(), peer_id); available_peer.reserve(); }); // Peer is reserved. - assert_eq!(peer_pool.available_peers().count(), 0); + assert_eq!(peer_pool.lock().available_peers().count(), 0); assert!(!peer_pool.try_reserve_peer(&peer_id)); } #[test] fn reserving_peer_via_try_reserve() { - let mut peer_pool = PeerPool::default(); - assert_eq!(peer_pool.available_peers().count(), 0); + let peer_pool = PeerPool::default(); + assert_eq!(peer_pool.lock().available_peers().count(), 0); let peer_id = PeerId::random(); peer_pool.add_peer(peer_id); - assert_eq!(peer_pool.available_peers().count(), 1); - assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + assert_eq!(peer_pool.lock().available_peers().count(), 1); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); // Reserve via `try_reserve_peer()`. assert!(peer_pool.try_reserve_peer(&peer_id)); // Peer is reserved. - assert_eq!(peer_pool.available_peers().count(), 0); + assert_eq!(peer_pool.lock().available_peers().count(), 0); assert!(!peer_pool.try_reserve_peer(&peer_id)); } #[test] fn freeing_peer() { - let mut peer_pool = PeerPool::default(); - assert_eq!(peer_pool.available_peers().count(), 0); + let peer_pool = PeerPool::default(); + assert_eq!(peer_pool.lock().available_peers().count(), 0); let peer_id = PeerId::random(); peer_pool.add_peer(peer_id); - assert_eq!(peer_pool.available_peers().count(), 1); - assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + assert_eq!(peer_pool.lock().available_peers().count(), 1); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); // Reserve via `try_reserve_peer()`. assert!(peer_pool.try_reserve_peer(&peer_id)); - assert_eq!(peer_pool.available_peers().count(), 0); + assert_eq!(peer_pool.lock().available_peers().count(), 0); assert!(!peer_pool.try_reserve_peer(&peer_id)); // Free peer. peer_pool.free_peer(&peer_id); // Peer is available. - assert_eq!(peer_pool.available_peers().count(), 1); - assert!(peer_pool.available_peers().any(|p| *p.peer_id() == peer_id)); + assert_eq!(peer_pool.lock().available_peers().count(), 1); + assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); // And can be reserved again. assert!(peer_pool.try_reserve_peer(&peer_id)); @@ -204,8 +216,8 @@ mod test { #[test] fn reserving_unknown_peer_fails() { - let mut peer_pool = PeerPool::default(); - assert_eq!(peer_pool.available_peers().count(), 0); + let peer_pool = PeerPool::default(); + assert_eq!(peer_pool.lock().available_peers().count(), 0); let peer_id = PeerId::random(); assert!(!peer_pool.try_reserve_peer(&peer_id)); diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 31a288b54093..b5c95f8e34c3 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -32,7 +32,6 @@ use crate::{ use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; use libp2p::PeerId; use log::{error, info}; -use parking_lot::Mutex; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -154,7 +153,7 @@ pub struct SyncingStrategy { warp: Option>, state: Option>, chain_sync: Option>, - peer_pool: Arc>, + peer_pool: PeerPool, peer_best_blocks: HashMap)>, } @@ -178,7 +177,7 @@ where if let SyncMode::Warp = config.mode { let warp_sync_config = warp_sync_config .expect("Warp sync configuration must be supplied in warp sync mode."); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let warp_sync = WarpSync::new(client.clone(), warp_sync_config, peer_pool.clone()); Ok(Self { config, @@ -190,7 +189,7 @@ where peer_best_blocks: Default::default(), }) } else { - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let chain_sync = ChainSync::new( chain_sync_mode(config.mode), client.clone(), @@ -214,7 +213,7 @@ where /// Notify that a new peer has connected. pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - self.peer_pool.lock().add_peer(peer_id); + self.peer_pool.add_peer(peer_id); self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); @@ -228,7 +227,7 @@ where self.state.as_mut().map(|s| s.remove_peer(peer_id)); self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id)); - self.peer_pool.lock().remove_peer(peer_id); + self.peer_pool.remove_peer(peer_id); self.peer_best_blocks.remove(peer_id); } diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index b5a84feeecce..0326f6dd1d92 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -44,7 +44,6 @@ use crate::{ use codec::Encode; use libp2p::PeerId; use log::{debug, error, info, trace, warn}; -use parking_lot::Mutex; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -287,7 +286,7 @@ pub struct ChainSync { /// Prometheus metrics. metrics: Option, /// Peer pool to reserve peers for requests from. - peer_pool: Arc>, + peer_pool: PeerPool, } /// All the data we have about a Peer that we are trying to sync with @@ -379,7 +378,7 @@ where max_parallel_downloads: u32, max_blocks_per_request: u32, metrics_registry: Option, - peer_pool: Arc>, + peer_pool: PeerPool, initial_peers: impl Iterator)>, ) -> Result { let mut sync = Self { @@ -573,7 +572,7 @@ where (Some(PeerSyncState::Available), None) } else { - if self.peer_pool.lock().try_reserve_peer(&peer_id) { + if self.peer_pool.try_reserve_peer(&peer_id) { let common_best = std::cmp::min(self.best_queued_number, best_number); debug!( @@ -742,7 +741,7 @@ where blocks.reverse() } self.allowed_requests.add(peer_id); - self.peer_pool.lock().free_peer(peer_id); + self.peer_pool.free_peer(peer_id); if let Some(request) = request { match &mut peer.state { PeerSyncState::DownloadingNew(_) => { @@ -997,7 +996,7 @@ where }; self.allowed_requests.add(&peer_id); - self.peer_pool.lock().free_peer(&peer_id); + self.peer_pool.free_peer(&peer_id); if let PeerSyncState::DownloadingJustification(hash) = peer.state { peer.state = PeerSyncState::Available; @@ -1379,7 +1378,7 @@ where PeerSyncState::DownloadingState => { self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); self.actions.push(ChainSyncAction::CancelRequest { peer_id }); - self.peer_pool.lock().free_peer(&peer_id); + self.peer_pool.free_peer(&peer_id); }, PeerSyncState::DownloadingJustification(_) => { // Peers that were downloading justifications @@ -1771,7 +1770,7 @@ where if let PeerSyncState::DownloadingState = peer.state { peer.state = PeerSyncState::Available; self.allowed_requests.set_all(); - self.peer_pool.lock().free_peer(peer_id); + self.peer_pool.free_peer(peer_id); } } let import_result = if let Some(sync) = &mut self.state_sync { diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index f9c522816da9..395502f8b233 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -20,7 +20,6 @@ use super::*; use futures::executor::block_on; -use parking_lot::Mutex; use sc_block_builder::BlockBuilderBuilder; use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; @@ -38,7 +37,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { let client = Arc::new(TestClientBuilder::new().build()); let peer_id = PeerId::random(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -64,7 +63,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { }; // add a new peer with the same best block - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); sync.add_peer(peer_id, a1_hash, a1_number); // Transit the peer into a requestable state let _ = sync.handle_new_peers().collect::>(); @@ -104,7 +103,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); // we request max 8 blocks to always initiate block requests to both peers for the test to be // deterministic @@ -143,8 +142,8 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - peer_pool.lock().add_peer(peer_id1); - peer_pool.lock().add_peer(peer_id2); + peer_pool.add_peer(peer_id1); + peer_pool.add_peer(peer_id2); // note peer 2 is at block 10 > 8, so we always have something to request from it, even if the // first request went to peer 1 sync.add_peer(peer_id1, Hash::random(), 42); @@ -161,7 +160,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { })); // add a new peer at a known block - peer_pool.lock().add_peer(peer_id3); + peer_pool.add_peer(peer_id3); sync.add_peer(peer_id3, b1_hash, b1_number); // transit the peer into a requestable state let _ = sync.handle_new_peers().collect::>(); @@ -315,7 +314,7 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { let mut client = Arc::new(TestClientBuilder::new().build()); let info = client.info(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -334,8 +333,8 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { let best_block = blocks.last().unwrap().clone(); let max_blocks_to_request = sync.max_blocks_per_request; // Connect the node we will sync from - peer_pool.lock().add_peer(peer_id1); - peer_pool.lock().add_peer(peer_id2); + peer_pool.add_peer(peer_id1); + peer_pool.add_peer(peer_id2); sync.add_peer(peer_id1, best_block.hash(), *best_block.header().number()); sync.add_peer(peer_id2, info.best_hash, 0); // Transit peers into requestable state @@ -475,7 +474,7 @@ fn can_sync_huge_fork() { }; let info = client.info(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -497,7 +496,7 @@ fn can_sync_huge_fork() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - peer_pool.lock().add_peer(peer_id1); + peer_pool.add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); @@ -619,7 +618,7 @@ fn syncs_fork_without_duplicate_requests() { }; let info = client.info(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -641,7 +640,7 @@ fn syncs_fork_without_duplicate_requests() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - peer_pool.lock().add_peer(peer_id1); + peer_pool.add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); @@ -765,7 +764,7 @@ fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -781,7 +780,7 @@ fn removes_target_fork_on_disconnect() { let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - peer_pool.lock().add_peer(peer_id1); + peer_pool.add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); // Create a "new" header and announce it @@ -801,7 +800,7 @@ fn can_import_response_with_missing_blocks() { let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); let empty_client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -816,7 +815,7 @@ fn can_import_response_with_missing_blocks() { let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - peer_pool.lock().add_peer(peer_id1); + peer_pool.add_peer(peer_id1); sync.add_peer(peer_id1, best_block.hash(), *best_block.header().number()); // Manually prepare the peer for a block request. @@ -846,7 +845,7 @@ fn ancestor_search_repeat() { #[test] fn sync_restart_removes_block_but_not_justification_requests() { let mut client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -883,7 +882,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { let mut pending_responses = HashSet::new(); // add new peer and request blocks from them - peer_pool.lock().add_peer(peers[0]); + peer_pool.add_peer(peers[0]); sync.add_peer(peers[0], Hash::random(), 42); sync.handle_new_peers().for_each(|result| { pending_responses.insert(result.unwrap().0); @@ -897,7 +896,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { } // add a new peer at a known block - peer_pool.lock().add_peer(peers[1]); + peer_pool.add_peer(peers[1]); sync.add_peer(peers[1], b1_hash, b1_number); sync.handle_new_peers().for_each(|result| { pending_responses.insert(result.unwrap().0); @@ -1006,7 +1005,7 @@ fn request_across_forks() { fork_blocks }; - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, @@ -1022,10 +1021,10 @@ fn request_across_forks() { // Add the peers, all at the common ancestor 100. let common_block = blocks.last().unwrap(); let peer_id1 = PeerId::random(); - peer_pool.lock().add_peer(peer_id1); + peer_pool.add_peer(peer_id1); sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number()); let peer_id2 = PeerId::random(); - peer_pool.lock().add_peer(peer_id2); + peer_pool.add_peer(peer_id2); sync.add_peer(peer_id2, common_block.hash(), *common_block.header().number()); // Peer 1 announces 107 from fork 1, 100-107 get downloaded. @@ -1101,7 +1100,7 @@ fn request_across_forks() { #[test] fn block_request_at_genesis_reserves_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1115,7 +1114,7 @@ fn block_request_at_genesis_reserves_peer_in_peer_pool() { // Add a peer. let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); sync.add_peer(peer_id, Hash::random(), 10); // A request is sent to our peer. @@ -1134,7 +1133,7 @@ fn block_request_at_genesis_reserves_peer_in_peer_pool() { #[test] fn block_response_frees_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1155,7 +1154,7 @@ fn block_response_frees_peer_in_peer_pool() { // Add a peer. let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); sync.add_peer(peer_id, best_block.hash(), *best_block.header().number()); // Transit peers into requestable state @@ -1192,7 +1191,7 @@ fn new_peer_ancestry_search_reserves_peer_in_peer_pool() { .block; block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1209,7 +1208,7 @@ fn new_peer_ancestry_search_reserves_peer_in_peer_pool() { // Add a peer. let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); sync.add_peer(peer_id, Hash::random(), 10); @@ -1229,7 +1228,7 @@ fn new_peer_ancestry_search_reserves_peer_in_peer_pool() { fn forced_ancestry_search_reserves_peer_in_peer_pool() { let mut client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1243,7 +1242,7 @@ fn forced_ancestry_search_reserves_peer_in_peer_pool() { // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); sync.add_peer(peer_id, Hash::random(), (MAX_BLOCKS_TO_LOOK_BACKWARDS + 2).into()); @@ -1285,7 +1284,7 @@ fn forced_ancestry_search_reserves_peer_in_peer_pool() { fn peer_block_request_reserves_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1299,7 +1298,7 @@ fn peer_block_request_reserves_peer_in_peer_pool() { // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); sync.add_peer(peer_id, Hash::random(), 10); @@ -1323,7 +1322,7 @@ fn peer_block_request_reserves_peer_in_peer_pool() { fn fork_sync_request_reserves_peer_in_peer_pool() { let client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1337,7 +1336,7 @@ fn fork_sync_request_reserves_peer_in_peer_pool() { // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); let fork_hash = Hash::random(); let fork_number = 10; @@ -1369,7 +1368,7 @@ fn fork_sync_request_reserves_peer_in_peer_pool() { fn justification_request_reserves_peer_in_peer_pool() { let mut client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1383,7 +1382,7 @@ fn justification_request_reserves_peer_in_peer_pool() { // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); sync.add_peer(peer_id, Hash::random(), (MAX_BLOCKS_TO_LOOK_BACKWARDS + 2).into()); @@ -1415,7 +1414,7 @@ fn justification_request_reserves_peer_in_peer_pool() { fn justification_response_frees_peer_in_peer_pool() { let mut client = Arc::new(TestClientBuilder::new().build()); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); + let peer_pool = PeerPool::default(); let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), @@ -1429,7 +1428,7 @@ fn justification_response_frees_peer_in_peer_pool() { // Add a peer that will be higher than our queued number (see below). let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); assert!(peer_pool.lock().available_peers().any(|p| *p.peer_id() == peer_id)); sync.add_peer(peer_id, Hash::random(), (MAX_BLOCKS_TO_LOOK_BACKWARDS + 2).into()); diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 921c014066c7..c56f2e242504 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -27,7 +27,6 @@ use crate::{ }; use libp2p::PeerId; use log::{debug, error, trace, warn}; -use parking_lot::Mutex; use sc_client_api::ProofProvider; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::BlockAnnounce; @@ -76,7 +75,7 @@ pub struct StateStrategy { peers: HashMap>, actions: Vec>, succeded: bool, - peer_pool: Arc>, + peer_pool: PeerPool, } impl StateStrategy { @@ -88,7 +87,7 @@ impl StateStrategy { target_justifications: Option, skip_proof: bool, initial_peers: impl Iterator)>, - peer_pool: Arc>, + peer_pool: PeerPool, ) -> Self where Client: ProofProvider + Send + Sync + 'static, @@ -119,7 +118,7 @@ impl StateStrategy { fn new_with_provider( state_sync_provider: Box>, initial_peers: impl Iterator)>, - peer_pool: Arc>, + peer_pool: PeerPool, ) -> Self { Self { state_sync: state_sync_provider, @@ -180,7 +179,7 @@ impl StateStrategy { if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } - self.peer_pool.lock().free_peer(&peer_id); + self.peer_pool.free_peer(&peer_id); let response: Box = response.0.downcast().map_err(|_error| { error!( @@ -437,8 +436,8 @@ mod test { .map(|best_number| (PeerId::random(), best_number)) .collect::>(); let initial_peers = peers.iter().map(|(p, n)| (*p, *n)); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - peers.iter().for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + let peer_pool = PeerPool::default(); + peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -473,8 +472,8 @@ mod test { .map(|best_number| (PeerId::random(), best_number)) .collect::>(); let initial_peers = peers.iter().map(|(p, n)| (*p, *n)); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - peers.iter().for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + let peer_pool = PeerPool::default(); + peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -505,10 +504,8 @@ mod test { let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - initial_peers - .iter() - .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + let peer_pool = PeerPool::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -541,10 +538,8 @@ mod test { let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - initial_peers - .iter() - .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + let peer_pool = PeerPool::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -577,10 +572,8 @@ mod test { let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - initial_peers - .iter() - .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + let peer_pool = PeerPool::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new( client.clone(), @@ -609,15 +602,15 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - peer_pool.lock().add_peer(peer_id); + let peer_pool = PeerPool::default(); + peer_pool.add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state. - assert!(peer_pool.lock().try_reserve_peer(&peer_id)); + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); @@ -625,7 +618,7 @@ mod test { assert!(matches!(state_strategy.peers.get(&peer_id).unwrap().state, PeerState::Available)); // Peer is also available in `PeerPool`. - assert!(peer_pool.lock().try_reserve_peer(&peer_id)); + assert!(peer_pool.try_reserve_peer(&peer_id)); } #[test] @@ -635,15 +628,15 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::BadResponse); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - peer_pool.lock().add_peer(peer_id); + let peer_pool = PeerPool::default(); + peer_pool.add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state. - assert!(peer_pool.lock().try_reserve_peer(&peer_id)); + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; // Receiving a response drops the peer. @@ -661,15 +654,15 @@ mod test { state_sync_provider.expect_import().return_once(|_| ImportResult::Continue); let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - peer_pool.lock().add_peer(peer_id); + let peer_pool = PeerPool::default(); + peer_pool.add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state . - assert!(peer_pool.lock().try_reserve_peer(&peer_id)); + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); @@ -726,15 +719,15 @@ mod test { // Prepare `StateStrategy`. let peer_id = PeerId::random(); let initial_peers = std::iter::once((peer_id, 10)); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - peer_pool.lock().add_peer(peer_id); + let peer_pool = PeerPool::default(); + peer_pool.add_peer(peer_id); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), initial_peers, peer_pool.clone(), ); // Manually set the peer's state . - assert!(peer_pool.lock().try_reserve_peer(&peer_id)); + assert!(peer_pool.try_reserve_peer(&peer_id)); state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; // Receive response. @@ -840,10 +833,8 @@ mod test { // Get enough peers for possible spurious requests. let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number)).collect::>(); - let peer_pool = Arc::new(Mutex::new(PeerPool::default())); - initial_peers - .iter() - .for_each(|(peer_id, _)| peer_pool.lock().add_peer(*peer_id)); + let peer_pool = PeerPool::default(); + initial_peers.iter().for_each(|(peer_id, _)| peer_pool.add_peer(*peer_id)); let mut state_strategy = StateStrategy::new_with_provider( Box::new(state_sync_provider), diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index c0da5abfc747..c78b7106d9f9 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -30,7 +30,6 @@ use codec::{Decode, Encode}; use futures::channel::oneshot; use libp2p::PeerId; use log::{debug, error, trace, warn}; -use parking_lot::Mutex; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; @@ -238,7 +237,7 @@ pub struct WarpSync { peers: HashMap>, actions: Vec>, result: Option>, - peer_pool: Arc>, + peer_pool: PeerPool, } impl WarpSync @@ -252,7 +251,7 @@ where pub fn new( client: Arc, warp_sync_config: WarpSyncConfig, - peer_pool: Arc>, + peer_pool: PeerPool, ) -> Self { if client.info().finalized_state.is_some() { error!( @@ -358,7 +357,7 @@ where if let Some(peer) = self.peers.get_mut(peer_id) { peer.state = PeerState::Available; } - self.peer_pool.lock().free_peer(&peer_id); + self.peer_pool.free_peer(&peer_id); let Phase::WarpProof { set_id, authorities, last_hash, warp_sync_provider } = &mut self.phase @@ -417,7 +416,7 @@ where if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } - self.peer_pool.lock().free_peer(&peer_id); + self.peer_pool.free_peer(&peer_id); let Phase::TargetBlock(header) = &mut self.phase else { debug!(target: LOG_TARGET, "Unexpected target block response from {peer_id}"); @@ -663,7 +662,6 @@ where #[cfg(test)] mod test { use super::*; - use parking_lot::Mutex; use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::{BlockStatus, Error as BlockchainError, HeaderBackend, Info}; use sp_consensus_grandpa::{AuthorityList, SetId}; @@ -803,20 +801,20 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Warp sync is not started when there is not enough peers. for _ in 0..(MIN_PEERS_TO_START_WARP_SYNC - 1) { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), 10); assert!(matches!(warp_sync.phase, Phase::WaitingForPeers { .. })) } // Now we have enough peers and warp sync is started. let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), 10); assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })) } @@ -850,12 +848,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -874,12 +872,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -897,13 +895,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -923,13 +921,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -957,13 +955,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make requests. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -983,13 +981,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1013,13 +1011,13 @@ mod test { Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure"))) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1053,13 +1051,13 @@ mod test { Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure"))) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1097,13 +1095,13 @@ mod test { Ok(VerificationResult::Partial(set_id, authorities, Hash::random())) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1144,13 +1142,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); @@ -1180,13 +1178,13 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(Arc::new(client), config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } // We are not in `Phase::TargetBlock` @@ -1218,13 +1216,13 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1253,13 +1251,13 @@ mod test { .block; let target_header = target_block.header().clone(); let config = WarpSyncConfig::WaitForTarget; - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1295,13 +1293,13 @@ mod test { .unwrap() .block; let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1331,13 +1329,13 @@ mod test { .unwrap() .block; let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1383,13 +1381,13 @@ mod test { let extra_block = extra_block_builder.build().unwrap().block; let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1458,13 +1456,13 @@ mod test { let wrong_block = wrong_block_builder.build().unwrap().block; let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1509,13 +1507,13 @@ mod test { .unwrap(); let target_block = target_block_builder.build().unwrap().block; let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1565,13 +1563,13 @@ mod test { .block; let target_header = target_block.header().clone(); let config = WarpSyncConfig::WaitForTarget; - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } @@ -1604,13 +1602,13 @@ mod test { .unwrap() .block; let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let peer_pool = Arc::new(Mutex::new(Default::default())); + let peer_pool = PeerPool::default(); let mut warp_sync = WarpSync::new(client, config, peer_pool.clone()); // Make sure we have enough peers to make a request. for best_number in 1..11 { let peer_id = PeerId::random(); - peer_pool.lock().add_peer(peer_id); + peer_pool.add_peer(peer_id); warp_sync.add_peer(peer_id, Hash::random(), best_number); } From 62dea9287d5b1519deaaf9b1a993ce896eb3fad0 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 1 Feb 2024 12:28:54 +0200 Subject: [PATCH 31/31] Simplify `schedule_next_peer()` --- .../client/network/sync/src/strategy/state.rs | 21 +++++-------------- .../client/network/sync/src/strategy/warp.rs | 21 +++++-------------- 2 files changed, 10 insertions(+), 32 deletions(-) diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index c56f2e242504..6bb1e8b4ef03 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -26,7 +26,7 @@ use crate::{ LOG_TARGET, }; use libp2p::PeerId; -use log::{debug, error, trace, warn}; +use log::{debug, error, trace}; use sc_client_api::ProofProvider; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network_common::sync::message::BlockAnnounce; @@ -305,21 +305,10 @@ impl StateStrategy { let threshold = std::cmp::max(median, min_best_number); // Find a random peer that is synced as much as peer majority and is above // `min_best_number`. - for mut available_peer in self.peer_pool.lock().available_peers() { - let peer_id = available_peer.peer_id(); - if let Some(peer) = self.peers.get_mut(peer_id) { - if peer.best_number >= threshold { - available_peer.reserve(); - peer.state = new_state; - return Some(*peer_id) - } - } else { - warn!( - target: LOG_TARGET, - "State inconsistency: peer {peer_id} is in the pool of connected peers, \ - but not known to `StateStrategy`.", - ); - debug_assert!(false); + for (peer_id, peer) in self.peers.iter_mut() { + if peer.best_number >= threshold && self.peer_pool.try_reserve_peer(peer_id) { + peer.state = new_state; + return Some(*peer_id) } } None diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index c78b7106d9f9..ae87d8409d3b 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -29,7 +29,7 @@ use crate::{ use codec::{Decode, Encode}; use futures::channel::oneshot; use libp2p::PeerId; -use log::{debug, error, trace, warn}; +use log::{debug, error, trace}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; @@ -493,21 +493,10 @@ where let threshold = std::cmp::max(median, min_best_number.unwrap_or(Zero::zero())); // Find a random available peer that is synced as much as peer majority and is above // `min_best_number`. - for mut available_peer in self.peer_pool.lock().available_peers() { - let peer_id = available_peer.peer_id(); - if let Some(peer) = self.peers.get_mut(peer_id) { - if peer.best_number >= threshold { - available_peer.reserve(); - peer.state = new_state; - return Some(*peer_id) - } - } else { - warn!( - target: LOG_TARGET, - "State inconsistency: peer {peer_id} is in the pool of connected peers, \ - but not known to `WarpSync`.", - ); - debug_assert!(false); + for (peer_id, peer) in self.peers.iter_mut() { + if peer.best_number >= threshold && self.peer_pool.try_reserve_peer(peer_id) { + peer.state = new_state; + return Some(*peer_id) } } None