From 570370fc8175830bc90c26b49a04d790240df8e9 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 29 Jan 2020 10:59:06 +0100 Subject: [PATCH] client/network-gossip: Integrate GossipEngine tasks into Future impl `GossipEngine` spawns two tasks, one for a periodic tick, one to forward messages from the network to subscribers. These tasks hold an `Arc` to a `GossipEngineInner`. To reduce the amount of shared ownership (locking) this patch integrates the two tasks into a `Future` implementation on the `GossipEngine` struct. This `Future` implementation can now be called from a single owner, e.g. the `finality-grandpa` `NetworkBridge`. As a side effect this removes the requirement on the `network-gossip` crate to spawn tasks and thereby removes the requirement on the `finality-grandpa` crate to spawn any tasks. This is part of a greater effort to reduce the number of owners of components within `finality-grandpa`, `network` and `network-gossip` as well as to reduce the amount of unbounded channels. For details see d4fbb897c, f0c18520a and 5afc7777e. --- bin/node-template/src/service.rs | 2 - bin/node/cli/src/service.rs | 2 - .../finality-grandpa/src/communication/mod.rs | 14 +- .../src/communication/tests.rs | 32 ++-- client/finality-grandpa/src/lib.rs | 16 +- client/finality-grandpa/src/observer.rs | 19 +-- client/finality-grandpa/src/tests.rs | 56 +++---- client/network-gossip/src/bridge.rs | 137 +++++++++--------- client/network-gossip/src/state_machine.rs | 2 + 9 files changed, 126 insertions(+), 154 deletions(-) diff --git a/bin/node-template/src/service.rs b/bin/node-template/src/service.rs index 8a5c660b23c7a..0687eae109d9b 100644 --- a/bin/node-template/src/service.rs +++ b/bin/node-template/src/service.rs @@ -160,7 +160,6 @@ pub fn new_full(config: Configuration { @@ -173,7 +172,6 @@ pub fn new_full(config: Configuration { @@ -229,7 +228,6 @@ macro_rules! new_full { on_exit: service.on_exit(), telemetry_on_connect: Some(service.telemetry_on_connect_stream()), voting_rule: grandpa::VotingRulesBuilder::default().build(), - executor: service.spawn_task_handle(), }; // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 42f26aa77e2ca..5afb6f3717efe 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -178,7 +178,6 @@ impl> NetworkBridge { service: N, config: crate::Config, set_state: crate::environment::SharedVoterSetState, - executor: &impl futures::task::Spawn, ) -> Self { let (validator, report_stream) = GossipValidator::new( config, @@ -186,7 +185,7 @@ impl> NetworkBridge { ); let validator = Arc::new(validator); - let gossip_engine = GossipEngine::new(service.clone(), executor, GRANDPA_ENGINE_ID, validator.clone()); + let gossip_engine = GossipEngine::new(service.clone(), GRANDPA_ENGINE_ID, validator.clone()); { // register all previous votes with the gossip service so that they're @@ -374,10 +373,9 @@ impl> NetworkBridge { |to, neighbor| self.neighbor_sender.send(to, neighbor), ); - let service = self.gossip_engine.clone(); let topic = global_topic::(set_id.0); let incoming = incoming_global( - service, + self.gossip_engine.clone(), topic, voters, self.validator.clone(), @@ -419,7 +417,7 @@ impl> NetworkBridge { impl> Future for NetworkBridge { type Output = Result<(), Error>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { loop { match self.neighbor_packet_worker.lock().poll_next_unpin(cx) { Poll::Ready(Some((to, packet))) => { @@ -444,6 +442,12 @@ impl> Future for NetworkBridge { } } + match self.gossip_engine.poll_unpin(cx) { + // The gossip engine future finished. We should do the same. + Poll::Ready(()) => return Poll::Ready(Ok(())), + Poll::Pending => {}, + } + Poll::Pending } } diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index a5435bdfdce55..80134097e6dea 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -156,7 +156,7 @@ fn voter_set_state() -> SharedVoterSetState { } // needs to run in a tokio runtime. -fn make_test_network(executor: &impl futures::task::Spawn) -> ( +fn make_test_network() -> ( impl Future, TestNetwork, ) { @@ -178,7 +178,6 @@ fn make_test_network(executor: &impl futures::task::Spawn) -> ( net.clone(), config(), voter_set_state(), - executor, ); ( @@ -252,8 +251,7 @@ fn good_commit_leads_to_relay() { let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); - let test = make_test_network(&threads_pool).0 + let test = make_test_network().0 .then(move |tester| { // register a peer. tester.gossip_validator.new_peer(&mut NoopContext, &id, sc_network::config::Roles::FULL); @@ -272,6 +270,7 @@ fn good_commit_leads_to_relay() { } let commit_to_send = encoded_commit.clone(); + let network_bridge = tester.net_handle.clone(); // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to @@ -316,7 +315,7 @@ fn good_commit_leads_to_relay() { // once the message is sent and commit is "handled" we should have // a repropagation event coming from the network. - future::join(send_message, handle_commit).then(move |(tester, ())| { + let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { tester.filter_network_events(move |event| match event { Event::WriteNotification(_, data) => { data == encoded_commit @@ -324,7 +323,11 @@ fn good_commit_leads_to_relay() { _ => false, }) }) - .map(|_| ()) + .map(|_| ()); + + // Poll both the future sending and handling the commit, as well as the underlying + // NetworkBridge. Complete once the former completes. + future::select(fut, network_bridge) }); futures::executor::block_on(test); @@ -376,8 +379,7 @@ fn bad_commit_leads_to_report() { let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); - let test = make_test_network(&threads_pool).0 + let test = make_test_network().0 .map(move |tester| { // register a peer. tester.gossip_validator.new_peer(&mut NoopContext, &id, sc_network::config::Roles::FULL); @@ -396,6 +398,7 @@ fn bad_commit_leads_to_report() { } let commit_to_send = encoded_commit.clone(); + let network_bridge = tester.net_handle.clone(); // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to @@ -418,7 +421,7 @@ fn bad_commit_leads_to_report() { _ => false, }); - // when the commit comes in, we'll tell the callback it was good. + // when the commit comes in, we'll tell the callback it was bad. let handle_commit = commits_in.into_future() .map(|(item, _)| { match item.unwrap() { @@ -431,7 +434,7 @@ fn bad_commit_leads_to_report() { // once the message is sent and commit is "handled" we should have // a report event coming from the network. - future::join(send_message, handle_commit).then(move |(tester, ())| { + let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { tester.filter_network_events(move |event| match event { Event::Report(who, cost_benefit) => { who == id && cost_benefit == super::cost::INVALID_COMMIT @@ -439,7 +442,11 @@ fn bad_commit_leads_to_report() { _ => false, }) }) - .map(|_| ()) + .map(|_| ()); + + // Poll both the future sending and handling the commit, as well as the underlying + // NetworkBridge. Complete once the former completes. + future::select(fut, network_bridge) }); futures::executor::block_on(test); @@ -449,8 +456,7 @@ fn bad_commit_leads_to_report() { fn peer_with_higher_view_leads_to_catch_up_request() { let id = sc_network::PeerId::random(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); - let (tester, mut net) = make_test_network(&threads_pool); + let (tester, mut net) = make_test_network(); let test = tester .map(move |tester| { // register a peer with authority role. diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index a86a97c0da7cc..105ae36152341 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -525,7 +525,7 @@ fn register_finality_tracker_inherent_data_provider( } /// Parameters used to run Grandpa. -pub struct GrandpaParams { +pub struct GrandpaParams { /// Configuration for the GRANDPA service. pub config: Config, /// A link to the block import worker. @@ -540,14 +540,12 @@ pub struct GrandpaParams { pub telemetry_on_connect: Option>, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, - /// How to spawn background tasks. - pub executor: Sp, } /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. -pub fn run_grandpa_voter( - grandpa_params: GrandpaParams, +pub fn run_grandpa_voter( + grandpa_params: GrandpaParams, ) -> sp_blockchain::Result + Unpin + Send + 'static> where Block::Hash: Ord, B: Backend + 'static, @@ -560,7 +558,6 @@ pub fn run_grandpa_voter( RA: Send + Sync + 'static, X: futures::Future + Clone + Send + Unpin + 'static, Client: AuxStore, - Sp: futures::task::Spawn + 'static, { let GrandpaParams { config, @@ -570,7 +567,6 @@ pub fn run_grandpa_voter( on_exit, telemetry_on_connect, voting_rule, - executor, } = grandpa_params; let LinkHalf { @@ -584,7 +580,6 @@ pub fn run_grandpa_voter( network, config.clone(), persistent_data.set_state.clone(), - &executor, ); register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; @@ -872,8 +867,8 @@ where } #[deprecated(since = "1.1.0", note = "Please switch to run_grandpa_voter.")] -pub fn run_grandpa( - grandpa_params: GrandpaParams, +pub fn run_grandpa( + grandpa_params: GrandpaParams, ) -> sp_blockchain::Result + Send + 'static> where Block::Hash: Ord, B: Backend + 'static, @@ -886,7 +881,6 @@ pub fn run_grandpa( VR: VotingRule> + Clone + 'static, X: futures::Future + Clone + Send + Unpin + 'static, Client: AuxStore, - Sp: futures::task::Spawn + 'static, { run_grandpa_voter(grandpa_params) } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 418bd570c0b86..2e50a555df35f 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -150,12 +150,11 @@ fn grandpa_observer( /// listening for and validating GRANDPA commits instead of following the full /// protocol. Provide configuration and a link to a block import worker that has /// already been instantiated with `block_import`. -pub fn run_grandpa_observer( +pub fn run_grandpa_observer( config: Config, link: LinkHalf, network: N, on_exit: impl futures::Future + Clone + Send + Unpin + 'static, - executor: Sp, ) -> sp_blockchain::Result + Unpin + Send + 'static> where B: Backend + 'static, E: CallExecutor + Send + Sync + 'static, @@ -163,7 +162,6 @@ pub fn run_grandpa_observer( SC: SelectChain + 'static, NumberFor: BlockNumberOps, RA: Send + Sync + 'static, - Sp: futures::task::Spawn + 'static, Client: AuxStore, { let LinkHalf { @@ -177,7 +175,6 @@ pub fn run_grandpa_observer( network, config.clone(), persistent_data.set_state.clone(), - &executor, ); let observer_work = ObserverWork::new( @@ -336,10 +333,8 @@ where { type Output = Result<(), Error>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); - - match Future::poll(Pin::new(&mut this.observer), cx) { + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match Future::poll(Pin::new(&mut self.observer), cx) { Poll::Pending => {} Poll::Ready(Ok(())) => { // observer commit stream doesn't conclude naturally; this could reasonably be an error. @@ -351,12 +346,12 @@ where } Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally - this.handle_voter_command(command)?; + self.handle_voter_command(command)?; cx.waker().wake_by_ref(); } } - match Stream::poll_next(Pin::new(&mut this.voter_commands_rx), cx) { + match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { Poll::Pending => {} Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. @@ -364,11 +359,11 @@ where } Poll::Ready(Some(command)) => { // some command issued externally - this.handle_voter_command(command)?; + self.handle_voter_command(command)?; cx.waker().wake_by_ref(); } } - Poll::Pending + Future::poll(Pin::new(&mut self.network), cx) } } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 0d7cf0541f355..cf340c695451c 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -387,7 +387,6 @@ fn block_until_complete(future: impl Future + Unpin, net: &Arc( runtime: &mut current_thread::Runtime, - threads_pool: &futures::executor::ThreadPool, blocks: u64, net: Arc>, peers: &[Ed25519Keyring], @@ -454,7 +453,6 @@ fn run_to_completion_with( on_exit: Exit, telemetry_on_connect: None, voting_rule: (), - executor: threads_pool.clone(), }; let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); @@ -473,12 +471,11 @@ fn run_to_completion_with( fn run_to_completion( runtime: &mut current_thread::Runtime, - threads_pool: &futures::executor::ThreadPool, blocks: u64, net: Arc>, peers: &[Ed25519Keyring] ) -> u64 { - run_to_completion_with(runtime, threads_pool, blocks, net, peers, |_| None) + run_to_completion_with(runtime, blocks, net, peers, |_| None) } fn add_scheduled_change(block: &mut Block, change: ScheduledChange) { @@ -503,7 +500,6 @@ fn add_forced_change( fn finalize_3_voters_no_observers() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -517,7 +513,7 @@ fn finalize_3_voters_no_observers() { } let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, &threads_pool, 20, net.clone(), peers); + run_to_completion(&mut runtime, 20, net.clone(), peers); // normally there's no justification for finalized blocks assert!( @@ -529,7 +525,6 @@ fn finalize_3_voters_no_observers() { #[test] fn finalize_3_voters_1_full_observer() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -589,7 +584,6 @@ fn finalize_3_voters_1_full_observer() { on_exit: Exit, telemetry_on_connect: None, voting_rule: (), - executor: threads_pool.clone(), }; voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); @@ -635,7 +629,6 @@ fn transition_3_voters_twice_1_full_observer() { let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); net.lock().peer(0).push_blocks(1, false); net.lock().block_until_sync(&mut runtime); @@ -754,7 +747,6 @@ fn transition_3_voters_twice_1_full_observer() { on_exit: Exit, telemetry_on_connect: None, voting_rule: (), - executor: threads_pool.clone(), }; let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); @@ -770,7 +762,6 @@ fn transition_3_voters_twice_1_full_observer() { #[test] fn justification_is_emitted_when_consensus_data_changes() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); @@ -779,7 +770,7 @@ fn justification_is_emitted_when_consensus_data_changes() { net.peer(0).push_authorities_change_block(new_authorities); net.block_until_sync(&mut runtime); let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, &threads_pool, 1, net.clone(), peers); + run_to_completion(&mut runtime, 1, net.clone(), peers); // ... and check that there's justification for block#1 assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(), @@ -789,7 +780,6 @@ fn justification_is_emitted_when_consensus_data_changes() { #[test] fn justification_is_generated_periodically() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -798,7 +788,7 @@ fn justification_is_generated_periodically() { net.block_until_sync(&mut runtime); let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, &threads_pool, 32, net.clone(), peers); + run_to_completion(&mut runtime, 32, net.clone(), peers); // when block#32 (justification_period) is finalized, justification // is required => generated @@ -829,7 +819,6 @@ fn consensus_changes_works() { #[test] fn sync_justifications_on_change_blocks() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_b); @@ -861,7 +850,7 @@ fn sync_justifications_on_change_blocks() { } let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, &threads_pool, 25, net.clone(), peers_a); + run_to_completion(&mut runtime, 25, net.clone(), peers_a); // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification @@ -884,7 +873,6 @@ fn sync_justifications_on_change_blocks() { fn finalizes_multiple_pending_changes_in_order() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; @@ -938,14 +926,13 @@ fn finalizes_multiple_pending_changes_in_order() { } let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, &threads_pool, 30, net.clone(), all_peers); + run_to_completion(&mut runtime, 30, net.clone(), all_peers); } #[test] fn force_change_to_new_set() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); // two of these guys are offline. let genesis_authorities = &[ Ed25519Keyring::Alice, @@ -996,7 +983,7 @@ fn force_change_to_new_set() { // it will only finalize if the forced transition happens. // we add_blocks after the voters are spawned because otherwise // the link-halfs have the wrong AuthoritySet - run_to_completion(&mut runtime, &threads_pool, 25, net, peers_a); + run_to_completion(&mut runtime, 25, net, peers_a); } #[test] @@ -1126,7 +1113,6 @@ fn voter_persists_its_votes() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -1165,7 +1151,6 @@ fn voter_persists_its_votes() { net: Arc>, client: PeersClient, keystore: KeyStorePtr, - threads_pool: futures::executor::ThreadPool, } impl Future for ResettableVoter { @@ -1204,7 +1189,6 @@ fn voter_persists_its_votes() { on_exit: Exit, telemetry_on_connect: None, voting_rule: VotingRulesBuilder::default().build(), - executor: this.threads_pool.clone(), }; let voter = run_grandpa_voter(grandpa_params) @@ -1236,7 +1220,6 @@ fn voter_persists_its_votes() { net: net.clone(), client: client.clone(), keystore, - threads_pool: threads_pool.clone(), }.unit_error().compat()); } @@ -1272,7 +1255,6 @@ fn voter_persists_its_votes() { net.lock().peers[1].network_service().clone(), config.clone(), set_state, - &threads_pool, ); let (round_rx, round_tx) = network.round_communication( @@ -1283,6 +1265,11 @@ fn voter_persists_its_votes() { HasVoted::No, ); + runtime.spawn( + network.map_err(|e| panic!("network bridge should not error: {:?}", e)) + .compat(), + ); + let round_tx = Arc::new(Mutex::new(round_tx)); let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); @@ -1383,7 +1370,6 @@ fn voter_persists_its_votes() { fn finalize_3_voters_1_light_observer() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); @@ -1400,10 +1386,12 @@ fn finalize_3_voters_1_light_observer() { let link = net.lock().peer(3).data.lock().take().expect("link initialized on startup; qed"); let finality_notifications = net.lock().peer(3).client().finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &20)) + .take_while(|n| { + future::ready(n.header.number() < &20) + }) .collect::>(); - run_to_completion_with(&mut runtime, &threads_pool, 20, net.clone(), authorities, |executor| { + run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { executor.spawn( run_grandpa_observer( Config { @@ -1417,7 +1405,6 @@ fn finalize_3_voters_1_light_observer() { link, net.lock().peers[3].network_service().clone(), Exit, - threads_pool.clone(), ).unwrap().unit_error().compat() ).unwrap(); @@ -1429,7 +1416,6 @@ fn finalize_3_voters_1_light_observer() { fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { let _ = ::env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers = &[Ed25519Keyring::Alice]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); @@ -1439,7 +1425,7 @@ fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { // && instead fetches finality proof for block #1 net.peer(0).push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, &threads_pool, 1, net.clone(), peers); + run_to_completion(&mut runtime, 1, net.clone(), peers); net.lock().block_until_sync(&mut runtime); // check that the block#1 is finalized on light client @@ -1461,7 +1447,6 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ let _ = ::env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); // two of these guys are offline. let genesis_authorities = if FORCE_CHANGE { @@ -1509,7 +1494,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ net.lock().block_until_sync(&mut runtime); // finalize block #11 on full clients - run_to_completion(&mut runtime, &threads_pool, 11, net.clone(), peers_a); + run_to_completion(&mut runtime, 11, net.clone(), peers_a); // request finalization by light client net.lock().add_light_peer(&GrandpaTestNet::default_config()); @@ -1526,7 +1511,6 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ fn voter_catches_up_to_latest_round_when_behind() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); @@ -1554,7 +1538,6 @@ fn voter_catches_up_to_latest_round_when_behind() { on_exit: Exit, telemetry_on_connect: None, voting_rule: (), - executor: threads_pool.clone(), }; Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) @@ -1646,8 +1629,6 @@ fn grandpa_environment_respects_voting_rules() { use finality_grandpa::Chain; use sc_network_test::TestClient; - let threads_pool = futures::executor::ThreadPool::new().unwrap(); - let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); @@ -1678,7 +1659,6 @@ fn grandpa_environment_respects_voting_rules() { network_service.clone(), config.clone(), set_state.clone(), - &threads_pool, ); Environment { diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index d6d6805b3e56c..dceb122082328 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -15,16 +15,17 @@ // along with Substrate. If not, see . use crate::{Network, Validator}; -use crate::state_machine::{ConsensusGossip, TopicNotification}; +use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}; use sc_network::message::generic::ConsensusMessage; use sc_network::{Event, ReputationChange}; -use futures::{prelude::*, channel::mpsc, compat::Compat01As03, task::SpawnExt as _}; +use futures::{prelude::*, channel::mpsc, compat::Compat01As03}; +use futures01::stream::Stream as Stream01; use libp2p::PeerId; use parking_lot::Mutex; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::{sync::Arc, time::Duration}; +use std::{pin::Pin, sync::Arc, task::{Context, Poll}}; /// Wraps around an implementation of the `Network` crate and provides gossiping capabilities on /// top of it. @@ -36,13 +37,17 @@ pub struct GossipEngine { struct GossipEngineInner { state_machine: ConsensusGossip, network: Box + Send>, + periodic_maintenance_interval: futures_timer::Delay, + network_event_stream: Compat01As03 + Send>>, + engine_id: ConsensusEngineId, } +impl Unpin for GossipEngineInner {} + impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( mut network: N, - executor: &impl futures::task::Spawn, engine_id: ConsensusEngineId, validator: Arc>, ) -> Self where B: 'static { @@ -50,7 +55,7 @@ impl GossipEngine { // We grab the event stream before registering the notifications protocol, otherwise we // might miss events. - let event_stream = network.event_stream(); + let network_event_stream = network.event_stream(); network.register_notifications_protocol(engine_id); state_machine.register_validator(&mut network, engine_id, validator); @@ -58,6 +63,9 @@ impl GossipEngine { let inner = Arc::new(Mutex::new(GossipEngineInner { state_machine, network: Box::new(network), + periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), + network_event_stream: Compat01As03::new(network_event_stream), + engine_id, })); let gossip_engine = GossipEngine { @@ -65,72 +73,6 @@ impl GossipEngine { engine_id, }; - let res = executor.spawn({ - let inner = Arc::downgrade(&inner); - async move { - loop { - let _ = futures_timer::Delay::new(Duration::from_millis(1100)).await; - if let Some(inner) = inner.upgrade() { - let mut inner = inner.lock(); - let inner = &mut *inner; - inner.state_machine.tick(&mut *inner.network); - } else { - // We reach this branch if the `Arc` has no reference - // left. We can now let the task end. - break; - } - } - } - }); - - // Note: we consider the chances of an error to spawn a background task almost null. - if res.is_err() { - log::error!(target: "gossip", "Failed to spawn background task"); - } - - let res = executor.spawn(async move { - let mut stream = Compat01As03::new(event_stream); - while let Some(Ok(event)) = stream.next().await { - match event { - Event::NotificationStreamOpened { remote, engine_id: msg_engine_id, roles } => { - if msg_engine_id != engine_id { - continue; - } - let mut inner = inner.lock(); - let inner = &mut *inner; - inner.state_machine.new_peer(&mut *inner.network, remote, roles); - } - Event::NotificationStreamClosed { remote, engine_id: msg_engine_id } => { - if msg_engine_id != engine_id { - continue; - } - let mut inner = inner.lock(); - let inner = &mut *inner; - inner.state_machine.peer_disconnected(&mut *inner.network, remote); - }, - Event::NotificationsReceived { remote, messages } => { - let mut inner = inner.lock(); - let inner = &mut *inner; - inner.state_machine.on_incoming( - &mut *inner.network, - remote, - messages.into_iter() - .filter_map(|(engine, data)| if engine == engine_id { - Some(ConsensusMessage { engine_id: engine, data: data.to_vec() }) - } else { None }) - .collect() - ); - }, - Event::Dht(_) => {} - } - } - }); - - // Note: we consider the chances of an error to spawn a background task almost null. - if res.is_err() { - log::error!(target: "gossip", "Failed to spawn background task"); - } - gossip_engine } @@ -222,6 +164,59 @@ impl GossipEngine { } } +impl Future for GossipEngine { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.lock().poll_unpin(cx) + } +} + +impl Future for GossipEngineInner { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = &mut *self; + + while let Poll::Ready(Some(Ok(event))) = this.network_event_stream.poll_next_unpin(cx) { + match event { + Event::NotificationStreamOpened { remote, engine_id: msg_engine_id, roles } => { + if msg_engine_id != this.engine_id { + continue; + } + this.state_machine.new_peer(&mut *this.network, remote, roles); + } + Event::NotificationStreamClosed { remote, engine_id: msg_engine_id } => { + if msg_engine_id != this.engine_id { + continue; + } + this.state_machine.peer_disconnected(&mut *this.network, remote); + }, + Event::NotificationsReceived { remote, messages } => { + let engine_id = this.engine_id.clone(); + this.state_machine.on_incoming( + &mut *this.network, + remote, + messages.into_iter() + .filter_map(|(engine, data)| if engine == engine_id { + Some(ConsensusMessage { engine_id: engine, data: data.to_vec() }) + } else { None }) + .collect() + ); + }, + Event::Dht(_) => {} + } + } + + while let Poll::Ready(Ok(())) = this.periodic_maintenance_interval.poll_unpin(cx) { + this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL); + this.state_machine.tick(&mut *this.network); + } + + Poll::Pending + } +} + impl Clone for GossipEngine { fn clone(&self) -> Self { GossipEngine { diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index d1931b1bd29d8..5cc230221f548 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -34,6 +34,8 @@ const KNOWN_MESSAGES_CACHE_SIZE: usize = 4096; const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); +pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); + mod rep { use sc_network::ReputationChange as Rep; /// Reputation change when a peer sends us a gossip message that we didn't know about.