Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Companion PR to splitting Roles (#960)
Browse files Browse the repository at this point in the history
* Companion PR to splitting Roles

* Fix network tests

* Fix service build

* Even more fixing

* Oops, quick fix

* use is_network_authority in grandpa service config

Co-authored-by: André Silva <andre.beat@gmail.com>
  • Loading branch information
tomaka and andresilva authored Apr 3, 2020
1 parent 3a17550 commit dddf83f
Show file tree
Hide file tree
Showing 7 changed files with 41 additions and 51 deletions.
4 changes: 2 additions & 2 deletions cli/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ where
TLightClient<R, D>
>,
{
match config.roles {
service::Roles::LIGHT =>
match config.role {
service::Role::Light =>
sc_cli::run_service_until_exit(
config,
|config| service::new_light::<R, D, E>(config),
Expand Down
10 changes: 5 additions & 5 deletions collator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use polkadot_primitives::{
};
use polkadot_cli::{
ProvideRuntimeApi, AbstractService, ParachainHost, IsKusama,
service::{self, Roles}
service::{self, Role}
};
pub use polkadot_cli::{VersionInfo, load_spec, service::Configuration};
pub use polkadot_validation::SignedStatement;
Expand Down Expand Up @@ -344,8 +344,8 @@ where
<P::ParachainContext as ParachainContext>::ProduceCandidate: Send,
{
let is_kusama = config.expect_chain_spec().is_kusama();
match (is_kusama, config.roles) {
(_, Roles::LIGHT) => return Err(
match (is_kusama, &config.role) {
(_, Role::Light) => return Err(
polkadot_service::Error::Other("light nodes are unsupported as collator".into())
).into(),
(true, _) =>
Expand Down Expand Up @@ -389,8 +389,8 @@ pub fn run_collator<P>(
P::ParachainContext: Send + 'static,
<P::ParachainContext as ParachainContext>::ProduceCandidate: Send,
{
match (config.expect_chain_spec().is_kusama(), config.roles) {
(_, Roles::LIGHT) => return Err(
match (config.expect_chain_spec().is_kusama(), &config.role) {
(_, Role::Light) => return Err(
polkadot_cli::Error::Input("light nodes are unsupported as collator".into())
).into(),
(true, _) =>
Expand Down
14 changes: 7 additions & 7 deletions network/src/legacy/gossip/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
use sp_runtime::traits::{BlakeTwo256, Hash as HashT};
use sp_blockchain::Error as ClientError;
use sc_network::{config::Roles, PeerId, ReputationChange};
use sc_network::{ObservedRole, PeerId, ReputationChange};
use sc_network::NetworkService;
use sc_network_gossip::{
ValidationResult as GossipValidationResult,
Expand Down Expand Up @@ -635,7 +635,7 @@ impl<C: ChainContext + ?Sized> MessageValidator<C> {
}

impl<C: ChainContext + ?Sized> sc_network_gossip::Validator<Block> for MessageValidator<C> {
fn new_peer(&self, _context: &mut dyn ValidatorContext<Block>, who: &PeerId, _roles: Roles) {
fn new_peer(&self, _context: &mut dyn ValidatorContext<Block>, who: &PeerId, _roles: ObservedRole) {
let mut inner = self.inner.write();
inner.peers.insert(who.clone(), PeerData::default());
}
Expand Down Expand Up @@ -833,7 +833,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -911,7 +911,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -953,7 +953,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -1007,7 +1007,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -1099,7 +1099,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down
12 changes: 6 additions & 6 deletions network/src/protocol/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ use polkadot_validation::{
SharedTable, TableRouter, Network as ParachainNetwork, Validated, GenericStatement, Collators,
SignedStatement,
};
use sc_network::{config::Roles, Event, PeerId};
use sc_network::{ObservedRole, Event, PeerId};
use sp_api::ProvideRuntimeApi;
use sp_runtime::ConsensusEngineId;

Expand Down Expand Up @@ -72,7 +72,7 @@ mod tests;
// Messages from the service API or network adapter.
enum ServiceToWorkerMsg {
// basic peer messages.
PeerConnected(PeerId, Roles),
PeerConnected(PeerId, ObservedRole),
PeerMessage(PeerId, Vec<bytes::Bytes>),
PeerDisconnected(PeerId),

Expand Down Expand Up @@ -255,11 +255,11 @@ pub fn start<C, Api, SP>(
Event::NotificationStreamOpened {
remote,
engine_id,
roles,
role,
} => {
if engine_id != POLKADOT_ENGINE_ID { continue }

worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, roles)).await
worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, role)).await
},
Event::NotificationStreamClosed {
remote,
Expand Down Expand Up @@ -496,8 +496,8 @@ impl ProtocolHandler {
}
}

fn on_connect(&mut self, peer: PeerId, roles: Roles) {
let claimed_validator = roles.contains(Roles::AUTHORITY);
fn on_connect(&mut self, peer: PeerId, role: ObservedRole) {
let claimed_validator = matches!(role, ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority | ObservedRole::Authority);

self.peers.insert(peer.clone(), PeerData {
claimed_validator,
Expand Down
6 changes: 3 additions & 3 deletions network/src/protocol/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ sp_api::mock_impl_runtime_apis! {
}

impl super::Service<MockNetworkOps> {
async fn connect_peer(&mut self, peer: PeerId, roles: Roles) {
async fn connect_peer(&mut self, peer: PeerId, roles: ObservedRole) {
self.sender.send(ServiceToWorkerMsg::PeerConnected(peer, roles)).await.unwrap();
}

Expand Down Expand Up @@ -373,7 +373,7 @@ fn validator_peer_cleaned_up() {

pool.spawner().spawn_local(worker_task).unwrap();
pool.run_until(async move {
service.connect_peer(peer.clone(), Roles::AUTHORITY).await;
service.connect_peer(peer.clone(), ObservedRole::Authority).await;
service.peer_message(peer.clone(), Message::Status(Status {
version: VERSION,
collating_for: None,
Expand Down Expand Up @@ -433,7 +433,7 @@ fn validator_key_spillover_cleaned() {

pool.spawner().spawn_local(worker_task).unwrap();
pool.run_until(async move {
service.connect_peer(peer.clone(), Roles::AUTHORITY).await;
service.connect_peer(peer.clone(), ObservedRole::Authority).await;
service.peer_message(peer.clone(), Message::Status(Status {
version: VERSION,
collating_for: None,
Expand Down
27 changes: 11 additions & 16 deletions network/test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ mod block_import;
use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}};

use log::trace;
use sc_network::config::{build_multiaddr, FinalityProofProvider};
use sc_network::config::{build_multiaddr, FinalityProofProvider, Role};
use sp_blockchain::{
Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo,
};
Expand All @@ -35,7 +35,6 @@ use sc_client_api::{
};
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
use sc_client::LongestChain;
use sc_network::config::Roles;
use sp_consensus::block_validation::DefaultBlockAnnounceValidator;
use sp_consensus::import_queue::{
BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport,
Expand Down Expand Up @@ -524,22 +523,21 @@ pub trait TestNetFactory: Sized {
/// Create new test network with this many peers.
fn new(n: usize) -> Self {
trace!(target: "test_network", "Creating test network");
let config = Self::default_config();
let mut net = Self::from_config(&config);
let mut net = Self::from_config(&Default::default());

for i in 0..n {
trace!(target: "test_network", "Adding peer {}", i);
net.add_full_peer(&config);
net.add_full_peer();
}
net
}

fn add_full_peer(&mut self, config: &ProtocolConfig) {
self.add_full_peer_with_states(config, None)
fn add_full_peer(&mut self,) {
self.add_full_peer_with_states(None)
}

/// Add a full peer.
fn add_full_peer_with_states(&mut self, config: &ProtocolConfig, keep_blocks: Option<u32>) {
fn add_full_peer_with_states(&mut self, keep_blocks: Option<u32>) {
let test_client_builder = match keep_blocks {
Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks),
None => TestClientBuilder::with_default_backend(),
Expand All @@ -558,7 +556,7 @@ pub trait TestNetFactory: Sized {

let verifier = self.make_verifier(
PeersClient::Full(client.clone(), backend.clone()),
config,
&Default::default(),
&data,
);
let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>)));
Expand All @@ -573,7 +571,7 @@ pub trait TestNetFactory: Sized {
let listen_addr = build_multiaddr![Memory(rand::random::<u64>())];

let network = NetworkWorker::new(sc_network::config::Params {
roles: config.roles,
role: Role::Full,
executor: None,
network_config: NetworkConfiguration {
listen_addresses: vec![listen_addr.clone()],
Expand Down Expand Up @@ -616,10 +614,7 @@ pub trait TestNetFactory: Sized {
}

/// Add a light peer.
fn add_light_peer(&mut self, config: &ProtocolConfig) {
let mut config = config.clone();
config.roles = Roles::LIGHT;

fn add_light_peer(&mut self) {
let (c, backend) = polkadot_test_runtime_client::new_light();
let client = Arc::new(c);
let (
Expand All @@ -632,7 +627,7 @@ pub trait TestNetFactory: Sized {

let verifier = self.make_verifier(
PeersClient::Light(client.clone(), backend.clone()),
&config,
&Default::default(),
&data,
);
let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>)));
Expand All @@ -647,7 +642,7 @@ pub trait TestNetFactory: Sized {
let listen_addr = build_multiaddr![Memory(rand::random::<u64>())];

let network = NetworkWorker::new(sc_network::config::Params {
roles: config.roles,
role: Role::Full,
executor: None,
network_config: NetworkConfiguration {
listen_addresses: vec![listen_addr.clone()],
Expand Down
19 changes: 7 additions & 12 deletions service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use inherents::InherentDataProviders;
use sc_executor::native_executor_instance;
use log::info;
pub use service::{
AbstractService, Roles, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand,
AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand,
TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
Configuration, ChainSpec,
};
Expand Down Expand Up @@ -314,7 +314,8 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
use futures::stream::StreamExt;

let is_collator = collating_for.is_some();
let is_authority = config.roles.is_authority() && !is_collator;
let role = config.role.clone();
let is_authority = role.is_authority() && !is_collator;
let force_authoring = config.force_authoring;
let max_block_data_size = max_block_data_size;
let db_path = if let DatabaseConfig::Path { ref path, .. } = config.expect_database() {
Expand All @@ -325,14 +326,8 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
let disable_grandpa = config.disable_grandpa;
let name = config.name.clone();
let authority_discovery_enabled = authority_discovery_enabled;
let sentry_nodes = config.network.sentry_nodes.clone();
let slot_duration = slot_duration;

// sentry nodes announce themselves as authorities to the network
// and should run the same protocols authorities do, but it should
// never actively participate in any consensus process.
let participates_in_consensus = is_authority && !config.sentry_mode;

let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config, Runtime, Dispatch);

let backend = builder.backend().clone();
Expand Down Expand Up @@ -388,7 +383,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
service.spawn_task_handle(),
).map_err(|e| format!("Could not spawn network worker: {:?}", e))?;

if participates_in_consensus {
if let Role::Authority { sentry_nodes } = &role {
let availability_store = {
use std::path::PathBuf;

Expand Down Expand Up @@ -470,7 +465,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
let authority_discovery = authority_discovery::AuthorityDiscovery::new(
service.client(),
network,
sentry_nodes,
sentry_nodes.clone(),
service.keystore(),
dht_event_stream,
service.prometheus_registry(),
Expand All @@ -481,7 +476,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(

// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if participates_in_consensus {
let keystore = if is_authority {
Some(service.keystore())
} else {
None
Expand All @@ -494,7 +489,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
name: Some(name),
observer_enabled: false,
keystore,
is_authority,
is_authority: role.is_network_authority(),
};

let enable_grandpa = !disable_grandpa;
Expand Down

0 comments on commit dddf83f

Please sign in to comment.